Switched back to monitoring

This commit is contained in:
Marcus Noble 2021-06-14 14:35:06 +01:00
parent d96095535e
commit 85569644f2
10 changed files with 103 additions and 999 deletions

View File

@ -1,24 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: logging
name: cluster-fun (scaleway)
source:
path: manifests/loki_chart
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
syncOptions:
- CreateNamespace=true
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
# apiVersion: argoproj.io/v1alpha1
# kind: Application
# metadata:
# name: loki
# namespace: argocd
# finalizers:
# - resources-finalizer.argocd.argoproj.io
# spec:
# project: cluster.fun
# destination:
# namespace: logging
# name: cluster-fun (scaleway)
# source:
# path: manifests/loki_chart
# repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
# targetRevision: HEAD
# syncPolicy:
# syncOptions:
# - CreateNamespace=true
# automated: {}
# ignoreDifferences:
# - kind: Secret
# jsonPointers:
# - /data

View File

@ -1,24 +1,24 @@
# apiVersion: argoproj.io/v1alpha1
# kind: Application
# metadata:
# name: monitoring
# namespace: argocd
# finalizers:
# - resources-finalizer.argocd.argoproj.io
# spec:
# project: cluster.fun
# destination:
# namespace: monitoring
# name: cluster-fun (scaleway)
# source:
# path: manifests/monitoring
# repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
# targetRevision: HEAD
# syncPolicy:
# automated: {}
# syncOptions:
# - CreateNamespace=true
# ignoreDifferences:
# - kind: Secret
# jsonPointers:
# - /data
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: monitoring
name: cluster-fun (scaleway)
source:
path: manifests/monitoring
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data

View File

@ -202,21 +202,7 @@ spec:
kind: Service
apiVersion: v1
metadata:
name: prometheus-local
namespace: inlets
spec:
type: ClusterIP
ports:
- port: 80
protocol: TCP
targetPort: 8000
selector:
app: inlets
---
kind: Service
apiVersion: v1
metadata:
name: loki-local
name: loki
namespace: inlets
spec:
type: ClusterIP

View File

@ -1,182 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: blackbox-exporter-psp
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- configMap
- secret
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: true
allowedCapabilities:
- NET_RAW
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: blackbox-exporter
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
---
apiVersion: v1
kind: ConfigMap
metadata:
name: blackbox-exporter
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
data:
blackbox.yaml: |
modules:
http_2xx:
http:
follow_redirects: true
preferred_ip_protocol: ip4
tls_config:
insecure_skip_verify: true
valid_http_versions:
- HTTP/1.1
- HTTP/2.0
prober: http
timeout: 5s
icmp_ping:
icmp:
preferred_ip_protocol: ip4
source_ip_address: 127.0.0.1
prober: icmp
timeout: 5s
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
name: blackbox-exporter
namespace: monitoring
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- blackbox-exporter-psp
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
name: blackbox-exporter
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: blackbox-exporter
subjects:
- kind: ServiceAccount
name: blackbox-exporter
---
kind: Service
apiVersion: v1
metadata:
name: blackbox-exporter
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
spec:
type: ClusterIP
ports:
- name: http
port: 9115
targetPort: http
protocol: TCP
selector:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: blackbox-exporter
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: blackbox-exporter
spec:
serviceAccountName: blackbox-exporter
restartPolicy: Always
containers:
- name: blackbox-exporter
image: "prom/blackbox-exporter:v0.19.0"
imagePullPolicy: IfNotPresent
securityContext:
readOnlyRootFilesystem: true
capabilities:
add: ["NET_RAW"]
args:
- "--config.file=/config/blackbox.yaml"
ports:
- containerPort: 9115
name: http
livenessProbe:
httpGet:
path: /health
port: http
readinessProbe:
httpGet:
path: /health
port: http
volumeMounts:
- mountPath: /config
name: config
volumes:
- name: config
configMap:
name: blackbox-exporter

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,51 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: grafana
namespace: inlets
labels:
app: grafana
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- grafana.cluster.fun
secretName: grafana-ingress
rules:
- host: grafana.cluster.fun
http:
paths:
- path: /
backend:
serviceName: auth-proxy
servicePort: 80
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: prometheus
namespace: inlets
labels:
app: prometheus
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- prometheus.cluster.fun
secretName: prometheus-ingress
rules:
- host: prometheus.cluster.fun
http:
paths:
- path: /
backend:
serviceName: auth-proxy
servicePort: 80

View File

@ -1,222 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: proxy-auth
namespace: monitoring
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-auth
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server-auth
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server-auth
template:
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server-auth
spec:
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://prometheus-server.monitoring.svc.cluster.local
- --http-address=$(HOST_IP):8080
- --redirect-url=https://prometheus.cluster.fun/oauth2/callback
- --email-domain=marcusnoble.co.uk
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQN
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: proxy-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: proxy-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v5.1.1
name: oauth-proxy
ports:
- containerPort: 8080
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-auth
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server-auth
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server-auth
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus-auth
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server-auth
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- prometheus.cluster.fun
secretName: prometheus-ingress
rules:
- host: prometheus.cluster.fun
http:
paths:
- backend:
service:
name: prometheus-auth
port:
number: 80
path: /
pathType: ImplementationSpecific
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-auth
namespace: monitoring
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/component: server-auth
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: grafana
app.kubernetes.io/component: server-auth
template:
metadata:
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/component: server-auth
spec:
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://grafana.monitoring.svc.cluster.local
- --http-address=$(HOST_IP):8080
- --redirect-url=https://grafana.cluster.fun/oauth2/callback
- --email-domain=marcusnoble.co.uk
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQN
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: proxy-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: proxy-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v5.1.1
name: oauth-proxy
ports:
- containerPort: 8080
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: grafana-auth
namespace: monitoring
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/component: server-auth
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app.kubernetes.io/name: grafana
app.kubernetes.io/component: server-auth
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana-auth
namespace: monitoring
labels:
app.kubernetes.io/name: grafana
app.kubernetes.io/component: server-auth
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- grafana.cluster.fun
secretName: grafana-ingress
rules:
- host: grafana.cluster.fun
http:
paths:
- backend:
service:
name: grafana-auth
port:
number: 80
path: /
pathType: ImplementationSpecific

View File

@ -1,232 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: loki
labels:
app.kubernetes.io/name: loki
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- 'configMap'
- 'emptyDir'
- 'persistentVolumeClaim'
- 'secret'
- 'projected'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: loki
namespace: monitoring
labels:
app.kubernetes.io/name: loki
---
apiVersion: v1
kind: ConfigMap
metadata:
name: loki
namespace: monitoring
labels:
app.kubernetes.io/name: loki
data:
loki.yaml: |
auth_enabled: false
chunk_store_config:
max_look_back_period: 0s
compactor:
shared_store: filesystem
working_directory: /data/loki/boltdb-shipper-compactor
ingester:
chunk_block_size: 262144
chunk_idle_period: 3m
chunk_retain_period: 1m
lifecycler:
ring:
kvstore:
store: inmemory
replication_factor: 1
max_transfer_retries: 0
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
schema_config:
configs:
- from: "2020-10-24"
index:
period: 24h
prefix: index_
object_store: filesystem
schema: v11
store: boltdb-shipper
server:
http_listen_port: 3100
storage_config:
boltdb_shipper:
active_index_directory: /data/loki/boltdb-shipper-active
cache_location: /data/loki/boltdb-shipper-cache
cache_ttl: 24h
shared_store: filesystem
filesystem:
directory: /data/loki/chunks
table_manager:
retention_deletes_enabled: true
retention_period: 720h
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: loki
namespace: monitoring
labels:
app.kubernetes.io/name: loki
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [loki]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: loki
namespace: monitoring
labels:
app.kubernetes.io/name: loki
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: loki
subjects:
- kind: ServiceAccount
name: loki
---
apiVersion: v1
kind: Service
metadata:
name: loki-headless
namespace: monitoring
labels:
app.kubernetes.io/name: loki
variant: headless
spec:
clusterIP: None
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app.kubernetes.io/name: loki
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: monitoring
labels:
app.kubernetes.io/name: loki
spec:
type: ClusterIP
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app.kubernetes.io/name: loki
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: loki
namespace: monitoring
labels:
app.kubernetes.io/name: loki
spec:
podManagementPolicy: OrderedReady
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: loki
serviceName: loki-headless
template:
metadata:
labels:
app.kubernetes.io/name: loki
annotations:
prometheus.io/port: http-metrics
prometheus.io/scrape: "true"
spec:
serviceAccountName: loki
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
containers:
- name: loki
image: "grafana/loki:2.2.1"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/loki/loki.yaml"
volumeMounts:
- name: config
mountPath: /etc/loki
- name: storage
mountPath: "/data"
subPath:
ports:
- name: http-metrics
containerPort: 3100
protocol: TCP
livenessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
readinessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
securityContext:
readOnlyRootFilesystem: true
terminationGracePeriodSeconds: 4800
volumes:
- name: config
configMap:
name: loki
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
storageClassName: scw-bssd

View File

@ -306,20 +306,6 @@ data:
- source_labels: [__meta_kubernetes_endpoints_name]
regex: 'node-exporter'
action: keep
- job_name: 'federated-clusters'
scrape_interval: 15s
honor_labels: true
params:
'match[]':
- '{job="prometheus"}'
- '{job="node"}'
- '{job="node_exporter"}'
- '{job="zfs_exporter"}'
- '{job=~"kubernetes.*"}'
metrics_path: '/federate'
static_configs:
- targets:
- 'prometheus-local.inlets.svc:80'
recording_rules.yml: |
{}
rules: |

View File

@ -56,6 +56,8 @@ data:
batchwait: 1s
external_labels: {}
timeout: 10s
external_labels:
kubernetes_cluster: scaleway
positions:
filename: /run/promtail/positions.yaml
server:
@ -396,7 +398,7 @@ spec:
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
- "-client.url=http://loki:3100/loki/api/v1/push"
- "-client.url=http://loki.inlets.svc:80/loki/api/v1/push"
volumeMounts:
- name: config
mountPath: /etc/promtail