diff --git a/manifests/11-year-anniversary/vpa.yaml b/manifests/11-year-anniversary/vpa.yaml
new file mode 100644
index 0000000..579af95
--- /dev/null
+++ b/manifests/11-year-anniversary/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: anniversary
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: anniversary
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/_apps/calendso.yaml b/manifests/_apps/calendso.yaml
new file mode 100644
index 0000000..79debbb
--- /dev/null
+++ b/manifests/_apps/calendso.yaml
@@ -0,0 +1,24 @@
+# apiVersion: argoproj.io/v1alpha1
+# kind: Application
+# metadata:
+# name: calendso
+# namespace: argocd
+# finalizers:
+# - resources-finalizer.argocd.argoproj.io
+# spec:
+# project: cluster.fun
+# destination:
+# namespace: calendso
+# name: cluster-fun (scaleway)
+# source:
+# path: manifests/calendso
+# repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
+# targetRevision: HEAD
+# syncPolicy:
+# automated: {}
+# syncOptions:
+# - CreateNamespace=true
+# ignoreDifferences:
+# - kind: Secret
+# jsonPointers:
+# - /data
diff --git a/manifests/_apps/harbor_chart.yaml b/manifests/_apps/harbor_chart.yaml
index 1811dde..3e960ac 100644
--- a/manifests/_apps/harbor_chart.yaml
+++ b/manifests/_apps/harbor_chart.yaml
@@ -16,6 +16,8 @@ spec:
targetRevision: HEAD
syncPolicy:
automated: {}
+ syncOptions:
+ - CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
diff --git a/manifests/_apps/vpa.yaml b/manifests/_apps/vpa.yaml
new file mode 100644
index 0000000..5d31145
--- /dev/null
+++ b/manifests/_apps/vpa.yaml
@@ -0,0 +1,27 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: vpa
+ namespace: argocd
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io
+spec:
+ project: cluster.fun
+ destination:
+ namespace: kube-system
+ name: cluster-fun (scaleway)
+ source:
+ repoURL: 'https://charts.fairwinds.com/stable'
+ targetRevision: 0.5.0
+ chart: vpa
+ helm:
+ version: v3
+ values: |-
+ recommender:
+ extraArgs:
+ prometheus-address: "http://prometheus-server.monitoring.svc:80"
+ storage: prometheus
+ admissionController:
+ enabled: true
+ syncPolicy:
+ automated: {}
diff --git a/manifests/base64/vpa.yaml b/manifests/base64/vpa.yaml
new file mode 100644
index 0000000..b3f07cc
--- /dev/null
+++ b/manifests/base64/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: base64
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: base64
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/blog/vpa.yaml b/manifests/blog/vpa.yaml
new file mode 100644
index 0000000..06acd36
--- /dev/null
+++ b/manifests/blog/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: blog
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: blog
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/calendso/calendso.yaml b/manifests/calendso/calendso.yaml
new file mode 100644
index 0000000..c74446f
--- /dev/null
+++ b/manifests/calendso/calendso.yaml
@@ -0,0 +1,120 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: calendso
+ namespace: calendso
+ annotations:
+ kube-1password: shgjmetsq7fcizmzzdn5ishzxu
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: calendso
+ namespace: calendso
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: web
+ name: web
+ selector:
+ app: calendso
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calendso
+ namespace: calendso
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: calendso
+ template:
+ metadata:
+ labels:
+ app: calendso
+ spec:
+ containers:
+ - name: web
+ image: calendso/calendso:latest
+ imagePullPolicy: Always
+ command:
+ - sh
+ - -c
+ - |
+ apt update && apt install -y netcat
+ /app/scripts/start.sh
+ env:
+ - name: NEXT_PUBLIC_LICENSE_CONSENT
+ value: agree
+ - name: NEXT_PUBLIC_TELEMETRY_KEY
+ value: ""
+ - name: BASE_URL
+ value: "https://meet.marcusnoble.co.uk"
+ - name: NEXT_PUBLIC_APP_URL
+ value: "https://meet.marcusnoble.co.uk"
+ - name: NODE_ENV
+ value: production
+ - name: POSTGRES_DB
+ value: calendso
+ - name: DATABASE_HOST
+ value: localhost:5432
+ envFrom:
+ - secretRef:
+ name: calendso
+ ports:
+ - containerPort: 3000
+ name: web
+ - name: postgres
+ image: postgres:9-alpine
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 5432
+ name: db
+ env:
+ - name: POSTGRES_USER
+ valueFrom:
+ secretKeyRef:
+ name: calendso
+ key: POSTGRES_USER
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: calendso
+ key: POSTGRES_PASSWORD
+ - name: POSTGRES_DB
+ value: calendso
+ - name: PGDATA
+ value: /var/lib/postgresql/data/calendso
+
+---
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: calendso
+ namespace: calendso
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt
+ nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
+spec:
+ ingressClassName: nginx
+ tls:
+ - hosts:
+ - meet.marcusnoble.co.uk
+ secretName: calendso-ingress
+ rules:
+ - host: meet.marcusnoble.co.uk
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ service:
+ name: calendso
+ port:
+ number: 80
diff --git a/manifests/cors-proxy/vpa.yaml b/manifests/cors-proxy/vpa.yaml
new file mode 100644
index 0000000..7da85a6
--- /dev/null
+++ b/manifests/cors-proxy/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: cors-proxy
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: cors-proxy
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/cv/vpa.yaml b/manifests/cv/vpa.yaml
new file mode 100644
index 0000000..308fd84
--- /dev/null
+++ b/manifests/cv/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: cv
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: cv
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/dashboard/vpa.yaml b/manifests/dashboard/vpa.yaml
new file mode 100644
index 0000000..a742498
--- /dev/null
+++ b/manifests/dashboard/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: dashboard
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: dashboard
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/feed-fetcher/vpa.yaml b/manifests/feed-fetcher/vpa.yaml
new file mode 100644
index 0000000..efeef37
--- /dev/null
+++ b/manifests/feed-fetcher/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: feed-fetcher
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: feed-fetcher
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/goplayground/vpa.yaml b/manifests/goplayground/vpa.yaml
new file mode 100644
index 0000000..bbb8f96
--- /dev/null
+++ b/manifests/goplayground/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: goplayground
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: goplayground
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/harbor_chart/harbor_chart.yaml b/manifests/harbor_chart/harbor_chart.yaml
deleted file mode 100644
index 8fc22ec..0000000
--- a/manifests/harbor_chart/harbor_chart.yaml
+++ /dev/null
@@ -1,133 +0,0 @@
-apiVersion: v1
-kind: Namespace
-metadata:
- name: harbor
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: harbor-values
- namespace: harbor
- annotations:
- kube-1password: igey7vjjiqmj25v64eck7cyj34
- kube-1password/vault: Kubernetes
- kube-1password/secret-text-key: values.yaml
-type: Opaque
----
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
- name: harbor
- namespace: harbor
-spec:
- chart:
- repository: https://helm.goharbor.io
- name: harbor
- version: 1.7.0
- maxHistory: 4
- skipCRDs: false
- valuesFrom:
- - secretKeyRef:
- name: harbor-values
- namespace: harbor
- key: values.yaml
- optional: false
- values:
- fullnameOverride: harbor-harbor-harbor
- externalURL: https://docker.cluster.fun
- updateStrategy:
- type: Recreate
- expose:
- type: ingress
- tls:
- enabled: true
- certSource: secret
- secret:
- secretName: harbor-harbor-ingress
- ingress:
- hosts:
- core: docker.cluster.fun
- annotations:
- cert-manager.io/cluster-issuer: letsencrypt
- nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
- nginx.ingress.kubernetes.io/proxy-body-size: "0"
- portal:
- replicas: 2
- priorityClassName: system-cluster-critical
- resources:
- requests:
- memory: 64Mi
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: component
- operator: In
- values:
- - portal
- - key: app
- operator: In
- values:
- - harbor
- topologyKey: kubernetes.io/hostname
- core:
- replicas: 2
- priorityClassName: system-cluster-critical
- resources:
- requests:
- memory: 64Mi
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: component
- operator: In
- values:
- - core
- - key: app
- operator: In
- values:
- - harbor
- topologyKey: kubernetes.io/hostname
- jobservice:
- replicas: 1
- resources:
- requests:
- memory: 64Mi
- jobLoggers:
- - stdout
- registry:
- replicas: 2
- priorityClassName: system-cluster-critical
- registry:
- resources:
- requests:
- memory: 64Mi
- controller:
- resources:
- requests:
- memory: 64Mi
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: component
- operator: In
- values:
- - registry
- - key: app
- operator: In
- values:
- - harbor
- topologyKey: kubernetes.io/hostname
- chartmuseum:
- enabled: false
- notary:
- enabled: false
- trivy:
- enabled: false
- metrics:
- enabled: true
diff --git a/manifests/harbor_chart/manifest.yaml b/manifests/harbor_chart/manifest.yaml
new file mode 100644
index 0000000..3b6e7f7
--- /dev/null
+++ b/manifests/harbor_chart/manifest.yaml
@@ -0,0 +1,1306 @@
+---
+# Source: harbor/templates/core/core-secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: harbor-harbor-harbor-core
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ kube-1password: nzrnkmyueqyr7qantfnizndoni
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+---
+# Source: harbor/templates/database/database-secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: "harbor-harbor-harbor-database"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ kube-1password: fyedoxemaq6ro7mxh5espv4ynu
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+---
+# Source: harbor/templates/exporter/exporter-secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: harbor-harbor-harbor-exporter
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ kube-1password: z2zxjpo26imlov3dxoq3ruwvw4
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+---
+# Source: harbor/templates/jobservice/jobservice-secrets.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: "harbor-harbor-harbor-jobservice"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ kube-1password: fx6f6bomevldvtuuffvifuwm74
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+---
+# Source: harbor/templates/registry/registry-secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: "harbor-harbor-harbor-registry"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ kube-1password: p2mdm5s7kmkffjk5ttakhvjru4
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+---
+# Source: harbor/templates/registry/registry-secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: "harbor-harbor-harbor-registry-htpasswd"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ kube-1password: qfmged45pt5jsytf2zz5dgaii4
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+---
+# Source: harbor/templates/core/core-cm.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: harbor-harbor-harbor-core
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+data:
+ app.conf: |+
+ appname = Harbor
+ runmode = prod
+ enablegzip = true
+
+ [prod]
+ httpport = 8080
+ PORT: "8080"
+ DATABASE_TYPE: "postgresql"
+ POSTGRESQL_HOST: "harbor-harbor-harbor-database"
+ POSTGRESQL_PORT: "5432"
+ POSTGRESQL_USERNAME: "postgres"
+ POSTGRESQL_DATABASE: "registry"
+ POSTGRESQL_SSLMODE: "disable"
+ POSTGRESQL_MAX_IDLE_CONNS: "100"
+ POSTGRESQL_MAX_OPEN_CONNS: "900"
+ EXT_ENDPOINT: "https://docker.cluster.fun"
+ CORE_URL: "http://harbor-harbor-harbor-core:80"
+ JOBSERVICE_URL: "http://harbor-harbor-harbor-jobservice"
+ REGISTRY_URL: "http://harbor-harbor-harbor-registry:5000"
+ TOKEN_SERVICE_URL: "http://harbor-harbor-harbor-core:80/service/token"
+ WITH_NOTARY: "false"
+ NOTARY_URL: "http://harbor-harbor-harbor-notary-server:4443"
+ CORE_LOCAL_URL: "http://127.0.0.1:8080"
+ WITH_TRIVY: "false"
+ TRIVY_ADAPTER_URL: "http://harbor-harbor-harbor-trivy:8080"
+ REGISTRY_STORAGE_PROVIDER_NAME: "s3"
+ WITH_CHARTMUSEUM: "false"
+ CHART_REPOSITORY_URL: "http://harbor-harbor-harbor-chartmuseum"
+ LOG_LEVEL: "info"
+ CONFIG_PATH: "/etc/core/app.conf"
+ CHART_CACHE_DRIVER: "redis"
+ _REDIS_URL_CORE: "redis://harbor-harbor-harbor-redis:6379/0?idle_timeout_seconds=30"
+ _REDIS_URL_REG: "redis://harbor-harbor-harbor-redis:6379/2?idle_timeout_seconds=30"
+ PORTAL_URL: "http://harbor-harbor-harbor-portal"
+ REGISTRY_CONTROLLER_URL: "http://harbor-harbor-harbor-registry:8080"
+ REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
+ HTTP_PROXY: ""
+ HTTPS_PROXY: ""
+ NO_PROXY: "harbor-harbor-harbor-core,harbor-harbor-harbor-jobservice,harbor-harbor-harbor-database,harbor-harbor-harbor-chartmuseum,harbor-harbor-harbor-notary-server,harbor-harbor-harbor-notary-signer,harbor-harbor-harbor-registry,harbor-harbor-harbor-portal,harbor-harbor-harbor-trivy,harbor-harbor-harbor-exporter,127.0.0.1,localhost,.local,.internal"
+ PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry"
+ METRIC_ENABLE: "true"
+ METRIC_PATH: "/metrics"
+ METRIC_PORT: "8001"
+ METRIC_NAMESPACE: harbor
+ METRIC_SUBSYSTEM: core
+---
+# Source: harbor/templates/exporter/exporter-cm-env.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "harbor-harbor-harbor-exporter-env"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+data:
+ HTTP_PROXY: ""
+ HTTPS_PROXY: ""
+ NO_PROXY: "harbor-harbor-harbor-core,harbor-harbor-harbor-jobservice,harbor-harbor-harbor-database,harbor-harbor-harbor-chartmuseum,harbor-harbor-harbor-notary-server,harbor-harbor-harbor-notary-signer,harbor-harbor-harbor-registry,harbor-harbor-harbor-portal,harbor-harbor-harbor-trivy,harbor-harbor-harbor-exporter,127.0.0.1,localhost,.local,.internal"
+ LOG_LEVEL: "info"
+ HARBOR_EXPORTER_PORT: "8001"
+ HARBOR_EXPORTER_METRICS_PATH: "/metrics"
+ HARBOR_EXPORTER_METRICS_ENABLED: "true"
+ HARBOR_EXPORTER_CACHE_TIME: "23"
+ HARBOR_EXPORTER_CACHE_CLEAN_INTERVAL: "14400"
+ HARBOR_METRIC_NAMESPACE: harbor
+ HARBOR_METRIC_SUBSYSTEM: exporter
+ HARBOR_REDIS_URL: "redis://harbor-harbor-harbor-redis:6379/1"
+ HARBOR_REDIS_NAMESPACE: harbor_job_service_namespace
+ HARBOR_REDIS_TIMEOUT: "3600"
+ HARBOR_SERVICE_SCHEME: "http"
+ HARBOR_SERVICE_HOST: "harbor-harbor-harbor-core"
+ HARBOR_SERVICE_PORT: "80"
+ HARBOR_DATABASE_HOST: "harbor-harbor-harbor-database"
+ HARBOR_DATABASE_PORT: "5432"
+ HARBOR_DATABASE_USERNAME: "postgres"
+ HARBOR_DATABASE_DBNAME: "registry"
+ HARBOR_DATABASE_SSLMODE: "disable"
+ HARBOR_DATABASE_MAX_IDLE_CONNS: "100"
+ HARBOR_DATABASE_MAX_OPEN_CONNS: "900"
+---
+# Source: harbor/templates/jobservice/jobservice-cm-env.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "harbor-harbor-harbor-jobservice-env"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+data:
+ CORE_URL: "http://harbor-harbor-harbor-core:80"
+ TOKEN_SERVICE_URL: "http://harbor-harbor-harbor-core:80/service/token"
+ REGISTRY_URL: "http://harbor-harbor-harbor-registry:5000"
+ REGISTRY_CONTROLLER_URL: "http://harbor-harbor-harbor-registry:8080"
+ REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
+ HTTP_PROXY: ""
+ HTTPS_PROXY: ""
+ NO_PROXY: "harbor-harbor-harbor-core,harbor-harbor-harbor-jobservice,harbor-harbor-harbor-database,harbor-harbor-harbor-chartmuseum,harbor-harbor-harbor-notary-server,harbor-harbor-harbor-notary-signer,harbor-harbor-harbor-registry,harbor-harbor-harbor-portal,harbor-harbor-harbor-trivy,harbor-harbor-harbor-exporter,127.0.0.1,localhost,.local,.internal"
+ METRIC_NAMESPACE: harbor
+ METRIC_SUBSYSTEM: jobservice
+---
+# Source: harbor/templates/jobservice/jobservice-cm.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "harbor-harbor-harbor-jobservice"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+data:
+ config.yml: |+
+ #Server listening port
+ protocol: "http"
+ port: 8080
+ worker_pool:
+ workers: 10
+ backend: "redis"
+ redis_pool:
+ redis_url: "redis://harbor-harbor-harbor-redis:6379/1"
+ namespace: "harbor_job_service_namespace"
+ idle_timeout_second: 3600
+ job_loggers:
+ - name: "STD_OUTPUT"
+ level: INFO
+ metric:
+ enabled: true
+ path: /metrics
+ port: 8001
+ #Loggers for the job service
+ loggers:
+ - name: "STD_OUTPUT"
+ level: INFO
+---
+# Source: harbor/templates/portal/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "harbor-harbor-harbor-portal"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+data:
+ nginx.conf: |+
+ worker_processes auto;
+ pid /tmp/nginx.pid;
+ events {
+ worker_connections 1024;
+ }
+ http {
+ client_body_temp_path /tmp/client_body_temp;
+ proxy_temp_path /tmp/proxy_temp;
+ fastcgi_temp_path /tmp/fastcgi_temp;
+ uwsgi_temp_path /tmp/uwsgi_temp;
+ scgi_temp_path /tmp/scgi_temp;
+ server {
+ listen 8080;
+ listen [::]:8080;
+ server_name localhost;
+ root /usr/share/nginx/html;
+ index index.html index.htm;
+ include /etc/nginx/mime.types;
+ gzip on;
+ gzip_min_length 1000;
+ gzip_proxied expired no-cache no-store private auth;
+ gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
+ location / {
+ try_files $uri $uri/ /index.html;
+ }
+ location = /index.html {
+ add_header Cache-Control "no-store, no-cache, must-revalidate";
+ }
+ }
+ }
+---
+# Source: harbor/templates/registry/registry-cm.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: "harbor-harbor-harbor-registry"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+data:
+ config.yml: |+
+ version: 0.1
+ log:
+ level: info
+ fields:
+ service: registry
+ storage:
+ s3:
+ region: fr-par
+ bucket: cluster.fun
+ regionendpoint: https://s3.fr-par.scw.cloud
+ rootdirectory: /harbor
+ cache:
+ layerinfo: redis
+ maintenance:
+ uploadpurging:
+ enabled: false
+ delete:
+ enabled: true
+ redirect:
+ disable: true
+ redis:
+ addr: harbor-harbor-harbor-redis:6379
+ db: 2
+ readtimeout: 10s
+ writetimeout: 10s
+ dialtimeout: 10s
+ pool:
+ maxidle: 100
+ maxactive: 500
+ idletimeout: 60s
+ http:
+ addr: :5000
+ relativeurls: false
+ # set via environment variable
+ # secret: placeholder
+ debug:
+ addr: :8001
+ prometheus:
+ enabled: true
+ path: /metrics
+ auth:
+ htpasswd:
+ realm: harbor-registry-basic-realm
+ path: /etc/registry/passwd
+ validation:
+ disabled: true
+ compatibility:
+ schema1:
+ enabled: true
+ ctl-config.yml: |+
+ ---
+ protocol: "http"
+ port: 8080
+ log_level: info
+ registry_config: "/etc/registry/config.yml"
+---
+# Source: harbor/templates/core/core-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: harbor-harbor-harbor-core
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - name: http-web
+ port: 80
+ targetPort: 8080
+ - name: http-metrics
+ port: 8001
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: core
+---
+# Source: harbor/templates/database/database-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: "harbor-harbor-harbor-database"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - port: 5432
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: database
+---
+# Source: harbor/templates/exporter/exporter-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: "harbor-harbor-harbor-exporter"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - name: http-metrics
+ port: 8001
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: exporter
+---
+# Source: harbor/templates/jobservice/jobservice-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: "harbor-harbor-harbor-jobservice"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - name: http-jobservice
+ port: 80
+ targetPort: 8080
+ - name: http-metrics
+ port: 8001
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: jobservice
+---
+# Source: harbor/templates/portal/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: "harbor-harbor-harbor-portal"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: portal
+---
+# Source: harbor/templates/redis/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: harbor-harbor-harbor-redis
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - port: 6379
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: redis
+---
+# Source: harbor/templates/registry/registry-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: "harbor-harbor-harbor-registry"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+spec:
+ ports:
+ - name: http-registry
+ port: 5000
+
+ - name: http-controller
+ port: 8080
+ - name: http-metrics
+ port: 8001
+ selector:
+ release: harbor-harbor
+ app: "harbor"
+ component: registry
+---
+# Source: harbor/templates/core/core-dpl.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: harbor-harbor-harbor-core
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: core
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: core
+ template:
+ metadata:
+ labels:
+ release: harbor-harbor
+ app: "harbor"
+ component: core
+ annotations:
+ checksum/configmap: 167dd1a6053c18e7ef228ae34b781b938287b997345e41d919b64437cd59721d
+ checksum/secret: 3267ef0049cdfabcdaa15a4e87e8624bbdd3e69acb51169d3cec91c6c321dda5
+ checksum/secret-jobservice: 87b4ffaead27c455b8f39f9223543537f996895e5042c22fc1e579d308726d6b
+ spec:
+ securityContext:
+ runAsUser: 10000
+ fsGroup: 10000
+ automountServiceAccountToken: false
+ terminationGracePeriodSeconds: 120
+ containers:
+ - name: core
+ image: goharbor/harbor-core:v2.3.0
+ imagePullPolicy: IfNotPresent
+ startupProbe:
+ httpGet:
+ path: /api/v2.0/ping
+ scheme: HTTP
+ port: 8080
+ failureThreshold: 360
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /api/v2.0/ping
+ scheme: HTTP
+ port: 8080
+ failureThreshold: 2
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /api/v2.0/ping
+ scheme: HTTP
+ port: 8080
+ failureThreshold: 2
+ periodSeconds: 10
+ envFrom:
+ - configMapRef:
+ name: "harbor-harbor-harbor-core"
+ - secretRef:
+ name: "harbor-harbor-harbor-core"
+ env:
+ - name: CORE_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: harbor-harbor-harbor-core
+ key: secret
+ - name: JOBSERVICE_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: "harbor-harbor-harbor-jobservice"
+ key: JOBSERVICE_SECRET
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: config
+ mountPath: /etc/core/app.conf
+ subPath: app.conf
+ - name: secret-key
+ mountPath: /etc/core/key
+ subPath: key
+ - name: token-service-private-key
+ mountPath: /etc/core/private_key.pem
+ subPath: tls.key
+ - name: ca-download
+ mountPath: /etc/core/ca
+ - name: psc
+ mountPath: /etc/core/token
+ resources:
+ requests:
+ memory: 64Mi
+ volumes:
+ - name: config
+ configMap:
+ name: harbor-harbor-harbor-core
+ items:
+ - key: app.conf
+ path: app.conf
+ - name: secret-key
+ secret:
+ secretName: harbor-harbor-harbor-core
+ items:
+ - key: secretKey
+ path: key
+ - name: token-service-private-key
+ secret:
+ secretName: harbor-harbor-harbor-core
+ - name: ca-download
+ secret:
+ - name: psc
+ emptyDir: {}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: component
+ operator: In
+ values:
+ - core
+ - key: app
+ operator: In
+ values:
+ - harbor
+ topologyKey: kubernetes.io/hostname
+ priorityClassName: system-cluster-critical
+---
+# Source: harbor/templates/exporter/exporter-dpl.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: harbor-harbor-harbor-exporter
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: exporter
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: exporter
+ template:
+ metadata:
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: exporter
+ annotations:
+ spec:
+ securityContext:
+ runAsUser: 10000
+ fsGroup: 10000
+ automountServiceAccountToken: false
+ containers:
+ - name: exporter
+ image: goharbor/harbor-exporter:v2.3.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 8001
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 8001
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ args: ["-log-level", "info"]
+ envFrom:
+ - configMapRef:
+ name: "harbor-harbor-harbor-exporter-env"
+ - secretRef:
+ name: "harbor-harbor-harbor-exporter"
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ volumes:
+ - name: config
+ secret:
+ secretName: "harbor-harbor-harbor-exporter"
+---
+# Source: harbor/templates/jobservice/jobservice-dpl.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: "harbor-harbor-harbor-jobservice"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: jobservice
+spec:
+ replicas: 1
+ strategy:
+ type: Recreate
+ rollingUpdate: null
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: jobservice
+ template:
+ metadata:
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: jobservice
+ annotations:
+ checksum/configmap: af6da052830476467f006a29d110274d1764756243b20e51aebdecd7d677b19a
+ checksum/configmap-env: ba3a529d03e0d0f9dbaab1bd37a3c43f3f914a0af9339b2c49311b37c7aec049
+ checksum/secret: 86d4dd7172a17e4ee3a7b0d58930056a0787e6244e97ac6362a1434e96bffd64
+ checksum/secret-core: 6f1def0912bfbb511b8d3ff055c3f95d998cd7e7c1432417b53cf5f4a4c289b5
+ spec:
+ securityContext:
+ runAsUser: 10000
+ fsGroup: 10000
+ automountServiceAccountToken: false
+ terminationGracePeriodSeconds: 120
+ containers:
+ - name: jobservice
+ image: goharbor/harbor-jobservice:v2.3.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ httpGet:
+ path: /api/v1/stats
+ scheme: HTTP
+ port: 8080
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /api/v1/stats
+ scheme: HTTP
+ port: 8080
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ memory: 64Mi
+ env:
+ - name: CORE_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: harbor-harbor-harbor-core
+ key: secret
+ envFrom:
+ - configMapRef:
+ name: "harbor-harbor-harbor-jobservice-env"
+ - secretRef:
+ name: "harbor-harbor-harbor-jobservice"
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: jobservice-config
+ mountPath: /etc/jobservice/config.yml
+ subPath: config.yml
+ - name: job-logs
+ mountPath: /var/log/jobs
+ subPath:
+ volumes:
+ - name: jobservice-config
+ configMap:
+ name: "harbor-harbor-harbor-jobservice"
+ - name: job-logs
+ emptyDir: {}
+---
+# Source: harbor/templates/portal/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: "harbor-harbor-harbor-portal"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: portal
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: portal
+ template:
+ metadata:
+ labels:
+ release: harbor-harbor
+ app: "harbor"
+ component: portal
+ annotations:
+ spec:
+ securityContext:
+ runAsUser: 10000
+ fsGroup: 10000
+ automountServiceAccountToken: false
+ containers:
+ - name: portal
+ image: goharbor/harbor-portal:v2.3.0
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ memory: 64Mi
+ livenessProbe:
+ httpGet:
+ path: /
+ scheme: HTTP
+ port: 8080
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /
+ scheme: HTTP
+ port: 8080
+ initialDelaySeconds: 1
+ periodSeconds: 10
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: portal-config
+ mountPath: /etc/nginx/nginx.conf
+ subPath: nginx.conf
+ volumes:
+ - name: portal-config
+ configMap:
+ name: "harbor-harbor-harbor-portal"
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: component
+ operator: In
+ values:
+ - portal
+ - key: app
+ operator: In
+ values:
+ - harbor
+ topologyKey: kubernetes.io/hostname
+ priorityClassName: system-cluster-critical
+---
+# Source: harbor/templates/registry/registry-dpl.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: "harbor-harbor-harbor-registry"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: registry
+spec:
+ replicas: 2
+ strategy:
+ type: Recreate
+ rollingUpdate: null
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: registry
+ template:
+ metadata:
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: registry
+ annotations:
+ checksum/configmap: 9556d2769d48cf3b1a5b97cb95ab15a184e156c38d4d25f1b5b6290f36e3a592
+ checksum/secret: fdbd36eee535adc702ff39bc7f483c5ce5e40ca5cd35bde8b83614383411efe8
+ checksum/secret-jobservice: 2c9a6a2532bb1b532b831db3a7e3b562cdd2829abe94188f5eb3a3f8ab4908fc
+ checksum/secret-core: 6201925d9501d8469ca1fef56e13a62e76b0fc525761760aa1b1a4488d79a221
+ spec:
+ securityContext:
+ runAsUser: 10000
+ fsGroup: 10000
+ automountServiceAccountToken: false
+ terminationGracePeriodSeconds: 120
+ containers:
+ - name: registry
+ image: goharbor/registry-photon:v2.3.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ httpGet:
+ path: /
+ scheme: HTTP
+ port: 5000
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /
+ scheme: HTTP
+ port: 5000
+ initialDelaySeconds: 1
+ periodSeconds: 10
+ resources:
+ requests:
+ memory: 64Mi
+ args: ["serve", "/etc/registry/config.yml"]
+ envFrom:
+ - secretRef:
+ name: "harbor-harbor-harbor-registry"
+ env:
+ ports:
+ - containerPort: 5000
+ - containerPort: 5001
+ volumeMounts:
+ - name: registry-data
+ mountPath: /storage
+ subPath:
+ - name: registry-htpasswd
+ mountPath: /etc/registry/passwd
+ subPath: passwd
+ - name: registry-config
+ mountPath: /etc/registry/config.yml
+ subPath: config.yml
+ - name: registryctl
+ image: goharbor/harbor-registryctl:v2.3.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ httpGet:
+ path: /api/health
+ scheme: HTTP
+ port: 8080
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /api/health
+ scheme: HTTP
+ port: 8080
+ initialDelaySeconds: 1
+ periodSeconds: 10
+ resources:
+ requests:
+ memory: 64Mi
+ envFrom:
+ - secretRef:
+ name: "harbor-harbor-harbor-registry"
+ env:
+ - name: CORE_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: harbor-harbor-harbor-core
+ key: secret
+ - name: JOBSERVICE_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: harbor-harbor-harbor-jobservice
+ key: JOBSERVICE_SECRET
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: registry-data
+ mountPath: /storage
+ subPath:
+ - name: registry-config
+ mountPath: /etc/registry/config.yml
+ subPath: config.yml
+ - name: registry-config
+ mountPath: /etc/registryctl/config.yml
+ subPath: ctl-config.yml
+ volumes:
+ - name: registry-htpasswd
+ secret:
+ secretName: harbor-harbor-harbor-registry-htpasswd
+ items:
+ - key: REGISTRY_HTPASSWD
+ path: passwd
+ - name: registry-config
+ configMap:
+ name: "harbor-harbor-harbor-registry"
+ - name: registry-data
+ emptyDir: {}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: component
+ operator: In
+ values:
+ - registry
+ - key: app
+ operator: In
+ values:
+ - harbor
+ topologyKey: kubernetes.io/hostname
+ priorityClassName: system-cluster-critical
+---
+# Source: harbor/templates/database/database-ss.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: "harbor-harbor-harbor-database"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: database
+spec:
+ replicas: 1
+ serviceName: "harbor-harbor-harbor-database"
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: database
+ template:
+ metadata:
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: database
+ annotations:
+ checksum/secret: 7a382608359a04f6943a40781d4010c95b076ef1dc524f02dfdbbe1f1d4b0615
+ spec:
+ securityContext:
+ runAsUser: 999
+ fsGroup: 999
+ automountServiceAccountToken: false
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ # as we change the data directory to a sub folder to support psp, the init container here
+ # is used to migrate the existing data. See https://github.com/goharbor/harbor-helm/issues/756
+ # for more detail.
+ # we may remove it after several releases
+ - name: "data-migrator"
+ image: goharbor/harbor-db:v2.3.0
+ imagePullPolicy: IfNotPresent
+ command: ["/bin/sh"]
+ args: ["-c", "[ -e /var/lib/postgresql/data/postgresql.conf ] && [ ! -d /var/lib/postgresql/data/pgdata ] && mkdir -m 0700 /var/lib/postgresql/data/pgdata && mv /var/lib/postgresql/data/* /var/lib/postgresql/data/pgdata/ || true"]
+ volumeMounts:
+ - name: database-data
+ mountPath: /var/lib/postgresql/data
+ subPath:
+ # with "fsGroup" set, each time a volume is mounted, Kubernetes must recursively chown() and chmod() all the files and directories inside the volume
+ # this causes the postgresql reports the "data directory /var/lib/postgresql/data/pgdata has group or world access" issue when using some CSIs e.g. Ceph
+ # use this init container to correct the permission
+ # as "fsGroup" applied before the init container running, the container has enough permission to execute the command
+ - name: "data-permissions-ensurer"
+ image: goharbor/harbor-db:v2.3.0
+ imagePullPolicy: IfNotPresent
+ command: ["/bin/sh"]
+ args: ["-c", "chmod -R 700 /var/lib/postgresql/data/pgdata || true"]
+ volumeMounts:
+ - name: database-data
+ mountPath: /var/lib/postgresql/data
+ subPath:
+ containers:
+ - name: database
+ image: goharbor/harbor-db:v2.3.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ exec:
+ command:
+ - /docker-healthcheck.sh
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ exec:
+ command:
+ - /docker-healthcheck.sh
+ initialDelaySeconds: 1
+ periodSeconds: 10
+ envFrom:
+ - secretRef:
+ name: "harbor-harbor-harbor-database"
+ env:
+ # put the data into a sub directory to avoid the permission issue in k8s with restricted psp enabled
+ # more detail refer to https://github.com/goharbor/harbor-helm/issues/756
+ - name: PGDATA
+ value: "/var/lib/postgresql/data/pgdata"
+ volumeMounts:
+ - name: database-data
+ mountPath: /var/lib/postgresql/data
+ subPath:
+ - name: shm-volume
+ mountPath: /dev/shm
+ volumes:
+ - name: shm-volume
+ emptyDir:
+ medium: Memory
+ sizeLimit: 512Mi
+ volumeClaimTemplates:
+ - metadata:
+ name: "database-data"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: "1Gi"
+---
+# Source: harbor/templates/redis/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: harbor-harbor-harbor-redis
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: redis
+spec:
+ replicas: 1
+ serviceName: harbor-harbor-harbor-redis
+ selector:
+ matchLabels:
+ release: harbor-harbor
+ app: "harbor"
+ component: redis
+ template:
+ metadata:
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ component: redis
+ spec:
+ securityContext:
+ runAsUser: 999
+ fsGroup: 999
+ automountServiceAccountToken: false
+ terminationGracePeriodSeconds: 120
+ containers:
+ - name: redis
+ image: goharbor/redis-photon:v2.3.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ readinessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 1
+ periodSeconds: 10
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/redis
+ subPath:
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: "1Gi"
+---
+# Source: harbor/templates/ingress/ingress.yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: "harbor-harbor-harbor-ingress"
+ labels:
+ heritage: Helm
+ release: harbor-harbor
+ chart: harbor
+ app: "harbor"
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt
+ ingress.kubernetes.io/proxy-body-size: "0"
+ ingress.kubernetes.io/ssl-redirect: "true"
+ nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
+ nginx.ingress.kubernetes.io/proxy-body-size: "0"
+ nginx.ingress.kubernetes.io/ssl-redirect: "true"
+spec:
+ tls:
+ - secretName: harbor-harbor-ingress
+ hosts:
+ - harbor.cluster.fun
+ - docker.cluster.fun
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-portal
+ port:
+ number: 80
+ - path: /api/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /service/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /v2
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /chartrepo/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /c/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ host: harbor.cluster.fun
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-portal
+ port:
+ number: 80
+ - path: /api/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /service/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /v2
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /chartrepo/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ - path: /c/
+ pathType: Prefix
+ backend:
+ service:
+ name: harbor-harbor-harbor-core
+ port:
+ number: 80
+ host: docker.cluster.fun
diff --git a/manifests/monitoring/promtail.yaml b/manifests/monitoring/promtail.yaml
index fd612be..d576bce 100644
--- a/manifests/monitoring/promtail.yaml
+++ b/manifests/monitoring/promtail.yaml
@@ -70,6 +70,7 @@ data:
- job_name: kubernetes-pods
pipeline_stages:
- docker: {}
+ - cri: {}
- match:
selector: '{app="weave-net"}'
action: drop
@@ -171,417 +172,6 @@ data:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- # - job_name: kubernetes-pods-name
- # pipeline_stages:
- # - docker: {}
- # - match:
- # selector: '{name="weave-net"}'
- # action: drop
- # - match:
- # selector: '{filename=~".*konnectivity.*"}'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*/healthz.*"'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*kube-probe/.*"'
- # action: drop
- # - match:
- # selector: '{app="internal-proxy"}'
- # action: drop
- # # - match:
- # # selector: '{k8s_app="traefik-ingress-lb"}'
- # # stages:
- # # - json:
- # # expressions:
- # # request_host: RequestHost
- # # request_path: RequestPath
- # # error: error
- # # - drop:
- # # source: "request_path"
- # # value: "/healthz"
- # # - template:
- # # source: has_error
- # # template: '{{ if .error }}true{{ else }}false{{ end }}'
- # # - labels:
- # # request_host:
- # # has_error:
- # kubernetes_sd_configs:
- # - role: pod
- # relabel_configs:
- # - source_labels:
- # - __meta_kubernetes_pod_label_name
- # target_label: __service__
- # - source_labels:
- # - __meta_kubernetes_pod_node_name
- # target_label: __host__
- # - action: drop
- # regex: ''
- # source_labels:
- # - __service__
- # - action: labelmap
- # regex: __meta_kubernetes_pod_label_(.+)
- # - action: replace
- # replacement: $1
- # separator: /
- # source_labels:
- # - __meta_kubernetes_namespace
- # - __service__
- # target_label: job
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_namespace
- # target_label: namespace
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_name
- # target_label: pod
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_container_name
- # target_label: container
- # - replacement: /var/log/pods/*$1/*.log
- # separator: /
- # source_labels:
- # - __meta_kubernetes_pod_uid
- # - __meta_kubernetes_pod_container_name
- # target_label: __path__
-
- # - job_name: kubernetes-pods-app
- # pipeline_stages:
- # - docker: {}
- # - match:
- # selector: '{name="weave-net"}'
- # action: drop
- # - match:
- # selector: '{filename=~".*konnectivity.*"}'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*/healthz.*"'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*kube-probe/.*"'
- # action: drop
- # - match:
- # selector: '{app="internal-proxy"}'
- # action: drop
- # # - match:
- # # selector: '{k8s_app="traefik-ingress-lb"}'
- # # stages:
- # # - json:
- # # expressions:
- # # request_host: RequestHost
- # # request_path: RequestPath
- # # error: error
- # # - drop:
- # # source: "request_path"
- # # value: "/healthz"
- # # - template:
- # # source: has_error
- # # template: '{{ if .error }}true{{ else }}false{{ end }}'
- # # - labels:
- # # request_host:
- # # has_error:
- # kubernetes_sd_configs:
- # - role: pod
- # relabel_configs:
- # - action: drop
- # regex: .+
- # source_labels:
- # - __meta_kubernetes_pod_label_name
- # - source_labels:
- # - __meta_kubernetes_pod_label_app
- # target_label: __service__
- # - source_labels:
- # - __meta_kubernetes_pod_node_name
- # target_label: __host__
- # - action: drop
- # regex: ''
- # source_labels:
- # - __service__
- # - action: labelmap
- # regex: __meta_kubernetes_pod_label_(.+)
- # - action: replace
- # replacement: $1
- # separator: /
- # source_labels:
- # - __meta_kubernetes_namespace
- # - __service__
- # target_label: job
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_namespace
- # target_label: namespace
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_name
- # target_label: pod
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_container_name
- # target_label: container
- # - replacement: /var/log/pods/*$1/*.log
- # separator: /
- # source_labels:
- # - __meta_kubernetes_pod_uid
- # - __meta_kubernetes_pod_container_name
- # target_label: __path__
-
- # - job_name: kubernetes-pods-direct-controllers
- # pipeline_stages:
- # - docker: {}
- # - match:
- # selector: '{name="weave-net"}'
- # action: drop
- # - match:
- # selector: '{filename=~".*konnectivity.*"}'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*/healthz.*"'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*kube-probe/.*"'
- # action: drop
- # - match:
- # selector: '{app="internal-proxy"}'
- # action: drop
- # # - match:
- # # selector: '{k8s_app="traefik-ingress-lb"}'
- # # stages:
- # # - json:
- # # expressions:
- # # request_host: RequestHost
- # # request_path: RequestPath
- # # error: error
- # # - drop:
- # # source: "request_path"
- # # value: "/healthz"
- # # - template:
- # # source: has_error
- # # template: '{{ if .error }}true{{ else }}false{{ end }}'
- # # - labels:
- # # request_host:
- # # has_error:
- # kubernetes_sd_configs:
- # - role: pod
- # relabel_configs:
- # - action: drop
- # regex: .+
- # separator: ''
- # source_labels:
- # - __meta_kubernetes_pod_label_name
- # - __meta_kubernetes_pod_label_app
- # - action: drop
- # regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
- # source_labels:
- # - __meta_kubernetes_pod_controller_name
- # - source_labels:
- # - __meta_kubernetes_pod_controller_name
- # target_label: __service__
- # - source_labels:
- # - __meta_kubernetes_pod_node_name
- # target_label: __host__
- # - action: drop
- # regex: ''
- # source_labels:
- # - __service__
- # - action: labelmap
- # regex: __meta_kubernetes_pod_label_(.+)
- # - action: replace
- # replacement: $1
- # separator: /
- # source_labels:
- # - __meta_kubernetes_namespace
- # - __service__
- # target_label: job
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_namespace
- # target_label: namespace
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_name
- # target_label: pod
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_container_name
- # target_label: container
- # - replacement: /var/log/pods/*$1/*.log
- # separator: /
- # source_labels:
- # - __meta_kubernetes_pod_uid
- # - __meta_kubernetes_pod_container_name
- # target_label: __path__
-
- # - job_name: kubernetes-pods-indirect-controller
- # pipeline_stages:
- # - docker: {}
- # - match:
- # selector: '{name="weave-net"}'
- # action: drop
- # - match:
- # selector: '{filename=~".*konnectivity.*"}'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*/healthz.*"'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*kube-probe/.*"'
- # action: drop
- # - match:
- # selector: '{app="internal-proxy"}'
- # action: drop
- # # - match:
- # # selector: '{k8s_app="traefik-ingress-lb"}'
- # # stages:
- # # - json:
- # # expressions:
- # # request_host: RequestHost
- # # request_path: RequestPath
- # # error: error
- # # - drop:
- # # source: "request_path"
- # # value: "/healthz"
- # # - template:
- # # source: has_error
- # # template: '{{ if .error }}true{{ else }}false{{ end }}'
- # # - labels:
- # # request_host:
- # # has_error:
- # kubernetes_sd_configs:
- # - role: pod
- # relabel_configs:
- # - action: drop
- # regex: .+
- # separator: ''
- # source_labels:
- # - __meta_kubernetes_pod_label_name
- # - __meta_kubernetes_pod_label_app
- # - action: keep
- # regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
- # source_labels:
- # - __meta_kubernetes_pod_controller_name
- # - action: replace
- # regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}'
- # source_labels:
- # - __meta_kubernetes_pod_controller_name
- # target_label: __service__
- # - source_labels:
- # - __meta_kubernetes_pod_node_name
- # target_label: __host__
- # - action: drop
- # regex: ''
- # source_labels:
- # - __service__
- # - action: labelmap
- # regex: __meta_kubernetes_pod_label_(.+)
- # - action: replace
- # replacement: $1
- # separator: /
- # source_labels:
- # - __meta_kubernetes_namespace
- # - __service__
- # target_label: job
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_namespace
- # target_label: namespace
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_name
- # target_label: pod
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_container_name
- # target_label: container
- # - replacement: /var/log/pods/*$1/*.log
- # separator: /
- # source_labels:
- # - __meta_kubernetes_pod_uid
- # - __meta_kubernetes_pod_container_name
- # target_label: __path__
-
- # - job_name: kubernetes-pods-static
- # pipeline_stages:
- # - docker: {}
- # - match:
- # selector: '{name="weave-net"}'
- # action: drop
- # - match:
- # selector: '{filename=~".*konnectivity.*"}'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*/healthz.*"'
- # action: drop
- # - match:
- # selector: '{name=~".*"} |~ ".*kube-probe/.*"'
- # action: drop
- # - match:
- # selector: '{app="internal-proxy"}'
- # action: drop
- # # - match:
- # # selector: '{k8s_app="traefik-ingress-lb"}'
- # # stages:
- # # - json:
- # # expressions:
- # # request_host: RequestHost
- # # request_path: RequestPath
- # # error: error
- # # - drop:
- # # source: "request_path"
- # # value: "/healthz"
- # # - template:
- # # source: has_error
- # # template: '{{ if .error }}true{{ else }}false{{ end }}'
- # # - labels:
- # # request_host:
- # # has_error:
- # kubernetes_sd_configs:
- # - role: pod
- # relabel_configs:
- # - action: drop
- # regex: ''
- # source_labels:
- # - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_label_component
- # target_label: __service__
- # - source_labels:
- # - __meta_kubernetes_pod_node_name
- # target_label: __host__
- # - action: drop
- # regex: ''
- # source_labels:
- # - __service__
- # - action: labelmap
- # regex: __meta_kubernetes_pod_label_(.+)
- # - action: replace
- # replacement: $1
- # separator: /
- # source_labels:
- # - __meta_kubernetes_namespace
- # - __service__
- # target_label: job
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_namespace
- # target_label: namespace
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_name
- # target_label: pod
- # - action: replace
- # source_labels:
- # - __meta_kubernetes_pod_container_name
- # target_label: container
- # - replacement: /var/log/pods/*$1/*.log
- # separator: /
- # source_labels:
- # - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
- # - __meta_kubernetes_pod_container_name
- # target_label: __path__
-
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
@@ -667,7 +257,7 @@ spec:
serviceAccountName: promtail
containers:
- name: promtail
- image: "grafana/promtail:2.2.1"
+ image: "grafana/promtail:2.4.1"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
diff --git a/manifests/nextcloud_chart/manifest.yaml b/manifests/nextcloud_chart/manifest.yaml
new file mode 100644
index 0000000..192b9d6
--- /dev/null
+++ b/manifests/nextcloud_chart/manifest.yaml
@@ -0,0 +1,926 @@
+---
+# Source: nextcloud/charts/redis/templates/secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: nextcloud-nextcloud-redis
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: "nextcloud-nextcloud"
+ heritage: "Helm"
+ annotations:
+ kube-1password: u54jxidod7tlnpwva37f5hcu5y
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+---
+# Source: nextcloud/templates/secrets.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: nextcloud-nextcloud
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ kube-1password: iaz4xmtr2czpsjl6xirhryzfia
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-parse: "true"
+type: Opaque
+
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: nextcloud-s3
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ kube-1password: 7zanxzbyzfctc5d2yqfq6e5zcy
+ kube-1password/vault: Kubernetes
+ kube-1password/secret-text-key: s3.config.php
+type: Opaque
+
+---
+# Source: nextcloud/charts/redis/templates/configmap-scripts.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nextcloud-nextcloud-redis-scripts
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ heritage: Helm
+ release: nextcloud-nextcloud
+data:
+ start-master.sh: |
+ #!/bin/bash
+ if [[ -n $REDIS_PASSWORD_FILE ]]; then
+ password_aux=`cat ${REDIS_PASSWORD_FILE}`
+ export REDIS_PASSWORD=$password_aux
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec /run.sh "${ARGS[@]}"
+ start-slave.sh: |
+ #!/bin/bash
+ if [[ -n $REDIS_PASSWORD_FILE ]]; then
+ password_aux=`cat ${REDIS_PASSWORD_FILE}`
+ export REDIS_PASSWORD=$password_aux
+ fi
+ if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then
+ password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}`
+ export REDIS_MASTER_PASSWORD=$password_aux
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec /run.sh "${ARGS[@]}"
+---
+# Source: nextcloud/charts/redis/templates/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nextcloud-nextcloud-redis
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ heritage: Helm
+ release: nextcloud-nextcloud
+data:
+ redis.conf: |-
+ # User-supplied configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ master.conf: |-
+ dir /data
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+---
+# Source: nextcloud/charts/redis/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nextcloud-nextcloud-redis-health
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ heritage: Helm
+ release: nextcloud-nextcloud
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+ no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -a $REDIS_PASSWORD $no_auth_warning \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+ no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -a $REDIS_PASSWORD $no_auth_warning \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+ no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -a $REDIS_MASTER_PASSWORD $no_auth_warning \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+ no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -a $REDIS_MASTER_PASSWORD $no_auth_warning \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: nextcloud/templates/config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nextcloud-nextcloud-config
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+data:
+ general.config.php: |-
+ 'https'
+ );
+ .htaccess: |-
+ # line below if for Apache 2.4
+
+ Require all denied
+
+ # line below if for Apache 2.2
+
+ deny from all
+
+ # section for Apache 2.2 and 2.4
+
+ IndexIgnore *
+
+ redis.config.php: |-
+ '\\OC\\Memcache\\Redis',
+ 'memcache.locking' => '\\OC\\Memcache\\Redis',
+ 'redis' => array(
+ 'host' => getenv('REDIS_HOST'),
+ 'port' => getenv('REDIS_HOST_PORT') ?: 6379,
+ 'password' => getenv('REDIS_HOST_PASSWORD'),
+ ),
+ );
+ }
+ apache-pretty-urls.config.php: |-
+ '/',
+ );
+ apcu.config.php: |-
+ '\\OC\\Memcache\\APCu',
+ );
+ apps.config.php: |-
+ array (
+ 0 => array (
+ "path" => OC::$SERVERROOT."/apps",
+ "url" => "/apps",
+ "writable" => false,
+ ),
+ 1 => array (
+ "path" => OC::$SERVERROOT."/custom_apps",
+ "url" => "/custom_apps",
+ "writable" => true,
+ ),
+ ),
+ );
+ autoconfig.php: |-
+ 'smtp',
+ 'mail_smtphost' => getenv('SMTP_HOST'),
+ 'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25),
+ 'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '',
+ 'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'),
+ 'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN',
+ 'mail_smtpname' => getenv('SMTP_NAME') ?: '',
+ 'mail_smtppassword' => getenv('SMTP_PASSWORD') ?: '',
+ 'mail_from_address' => getenv('MAIL_FROM_ADDRESS'),
+ 'mail_domain' => getenv('MAIL_DOMAIN'),
+ );
+ }
+---
+# Source: nextcloud/templates/nextcloud-pvc.yaml
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: nextcloud-nextcloud-nextcloud
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: app
+spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "5Gi"
+ storageClassName: "scw-bssd-retain"
+---
+# Source: nextcloud/charts/redis/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: nextcloud-nextcloud-redis-headless
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: nextcloud-nextcloud
+ heritage: Helm
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app: redis
+ release: nextcloud-nextcloud
+---
+# Source: nextcloud/charts/redis/templates/redis-master-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: nextcloud-nextcloud-redis-master
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: nextcloud-nextcloud
+ heritage: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - name: redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app: redis
+ release: nextcloud-nextcloud
+ role: master
+---
+# Source: nextcloud/charts/redis/templates/redis-slave-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: nextcloud-nextcloud-redis-slave
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: nextcloud-nextcloud
+ heritage: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - name: redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app: redis
+ release: nextcloud-nextcloud
+ role: slave
+---
+# Source: nextcloud/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: nextcloud-nextcloud
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: app
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8080
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: nextcloud
+ app.kubernetes.io/component: app
+---
+# Source: nextcloud/templates/deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nextcloud-nextcloud
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: app
+spec:
+ replicas: 1
+ strategy:
+ type: Recreate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: nextcloud
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/component: app
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: nextcloud
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/component: app
+ nextcloud-nextcloud-redis-client: "true"
+ spec:
+ containers:
+ - name: nextcloud
+ image: "nextcloud:21.0.1-apache"
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SQLITE_DATABASE
+ value: "nextcloud"
+ - name: NEXTCLOUD_ADMIN_USER
+ valueFrom:
+ secretKeyRef:
+ name: nextcloud-nextcloud
+ key: nextcloud-username
+ - name: NEXTCLOUD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: nextcloud-nextcloud
+ key: nextcloud-password
+ - name: NEXTCLOUD_TRUSTED_DOMAINS
+ value: nextcloud.cluster.fun
+ - name: NEXTCLOUD_DATA_DIR
+ value: "/var/www/html/data"
+ - name: REDIS_HOST
+ value: nextcloud-nextcloud-redis-master
+ - name: REDIS_HOST_PORT
+ value: "6379"
+ - name: REDIS_HOST_PASSWORD
+ value: changeme
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /status.php
+ port: http
+ httpHeaders:
+ - name: Host
+ value: "nextcloud.cluster.fun"
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /status.php
+ port: http
+ httpHeaders:
+ - name: Host
+ value: "nextcloud.cluster.fun"
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ requests:
+ memory: 500Mi
+ volumeMounts:
+ - name: nextcloud-data
+ mountPath: /var/www/
+ subPath: root
+ - name: nextcloud-data
+ mountPath: /var/www/html
+ subPath: html
+ - name: nextcloud-data
+ mountPath: /var/www/html/data
+ subPath: data
+ - name: nextcloud-data
+ mountPath: /var/www/html/config
+ subPath: config
+ - name: nextcloud-data
+ mountPath: /var/www/html/custom_apps
+ subPath: custom_apps
+ - name: nextcloud-data
+ mountPath: /var/www/tmp
+ subPath: tmp
+ - name: nextcloud-data
+ mountPath: /var/www/html/themes
+ subPath: themes
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/general.config.php
+ subPath: general.config.php
+ - name: nextcloud-s3
+ mountPath: /var/www/html/config/s3.config.php
+ subPath: s3.config.php
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/.htaccess
+ subPath: .htaccess
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/apache-pretty-urls.config.php
+ subPath: apache-pretty-urls.config.php
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/apcu.config.php
+ subPath: apcu.config.php
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/apps.config.php
+ subPath: apps.config.php
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/autoconfig.php
+ subPath: autoconfig.php
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/redis.config.php
+ subPath: redis.config.php
+ - name: nextcloud-config
+ mountPath: /var/www/html/config/smtp.config.php
+ subPath: smtp.config.php
+ volumes:
+ - name: nextcloud-data
+ persistentVolumeClaim:
+ claimName: nextcloud-nextcloud-nextcloud
+ - name: nextcloud-config
+ configMap:
+ name: nextcloud-nextcloud-config
+ - name: nextcloud-s3
+ secret:
+ secretName: nextcloud-s3
+ # Will mount configuration files as www-data (id: 33) for nextcloud
+ securityContext:
+ fsGroup: 33
+---
+# Source: nextcloud/charts/redis/templates/redis-master-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: nextcloud-nextcloud-redis-master
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: nextcloud-nextcloud
+ heritage: Helm
+spec:
+ selector:
+ matchLabels:
+ app: redis
+ release: nextcloud-nextcloud
+ role: master
+ serviceName: nextcloud-nextcloud-redis-headless
+ template:
+ metadata:
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: nextcloud-nextcloud
+ role: master
+ annotations:
+ checksum/health: c0aae3fbf6b70535e576f3897c60cf19bbfa814f584e599380329bda59b56da1
+ checksum/configmap: f8ab8ce93e6b4e78f477182c06db788d39b372cbb49261bf85c85cdfea869df5
+ checksum/secret: 79779a23e0c21d77248d142206b297f89fa5241bb156f83be3705dbb0de0d6e8
+ spec:
+
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: default
+ containers:
+ - name: redis
+ image: docker.io/bitnami/redis:6.0.8-debian-10-r0
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: nextcloud-nextcloud-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ null
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: nextcloud-nextcloud-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: nextcloud-nextcloud-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: nextcloud-nextcloud-redis
+ - name: redis-tmp-conf
+ emptyDir: {}
+ volumeClaimTemplates:
+ - metadata:
+ name: redis-data
+ labels:
+ app: redis
+ release: nextcloud-nextcloud
+ heritage: Helm
+ component: master
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "8Gi"
+
+ selector:
+ updateStrategy:
+ type: RollingUpdate
+---
+# Source: nextcloud/charts/redis/templates/redis-slave-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: nextcloud-nextcloud-redis-slave
+ namespace: nextcloud
+ labels:
+ app: redis
+ chart: redis-11.0.5
+ release: nextcloud-nextcloud
+ heritage: Helm
+spec:
+ replicas: 2
+ serviceName: nextcloud-nextcloud-redis-headless
+ selector:
+ matchLabels:
+ app: redis
+ release: nextcloud-nextcloud
+ role: slave
+ template:
+ metadata:
+ labels:
+ app: redis
+ release: nextcloud-nextcloud
+ chart: redis-11.0.5
+ role: slave
+ annotations:
+ checksum/health: c0aae3fbf6b70535e576f3897c60cf19bbfa814f584e599380329bda59b56da1
+ checksum/configmap: f8ab8ce93e6b4e78f477182c06db788d39b372cbb49261bf85c85cdfea869df5
+ checksum/secret: 79779a23e0c21d77248d142206b297f89fa5241bb156f83be3705dbb0de0d6e8
+ spec:
+
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: default
+ containers:
+ - name: redis
+ image: docker.io/bitnami/redis:6.0.8-debian-10-r0
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-slave.sh
+ env:
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: nextcloud-nextcloud-redis-master-0.nextcloud-nextcloud-redis-headless.nextcloud.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: nextcloud-nextcloud-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: nextcloud-nextcloud-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 11
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 10
+ resources:
+ null
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: nextcloud-nextcloud-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: nextcloud-nextcloud-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: nextcloud-nextcloud-redis
+ - name: redis-tmp-conf
+ emptyDir: {}
+ volumeClaimTemplates:
+ - metadata:
+ name: redis-data
+ labels:
+ app: redis
+ release: nextcloud-nextcloud
+ heritage: Helm
+ component: slave
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "8Gi"
+
+ selector:
+ updateStrategy:
+ type: RollingUpdate
+---
+# Source: nextcloud/templates/cronjob.yaml
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: nextcloud-nextcloud-cron
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ {}
+spec:
+ schedule: "*/5 * * * *"
+ concurrencyPolicy: Forbid
+ failedJobsHistoryLimit: 5
+ successfulJobsHistoryLimit: 2
+ jobTemplate:
+ metadata:
+ labels:
+ app.kubernetes.io/name: nextcloud
+ app.kubernetes.io/managed-by: Helm
+ spec:
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: nextcloud
+ app.kubernetes.io/managed-by: Helm
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: nextcloud
+ image: "nextcloud:21.0.1-apache"
+ imagePullPolicy: IfNotPresent
+ command: [ "curl" ]
+ args:
+ - "--fail"
+ - "-L"
+ - "https://nextcloud.cluster.fun/cron.php"
+ resources:
+ requests:
+ memory: 500Mi
+---
+# Source: nextcloud/templates/ingress.yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: nextcloud-nextcloud
+ labels:
+ app.kubernetes.io/name: nextcloud
+ helm.sh/chart: nextcloud-2.6.3
+ app.kubernetes.io/instance: nextcloud-nextcloud
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: app
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt
+ nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
+ nginx.ingress.kubernetes.io/proxy-body-size: "0"
+spec:
+ rules:
+ - host: nextcloud.cluster.fun
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: nextcloud-nextcloud
+ port:
+ number: 8080
+ tls:
+ - hosts:
+ - nextcloud.cluster.fun
+ secretName: nextcloud-ingress
diff --git a/manifests/nextcloud_chart/nextcloud_chart.yaml b/manifests/nextcloud_chart/nextcloud_chart.yaml
deleted file mode 100644
index 265af65..0000000
--- a/manifests/nextcloud_chart/nextcloud_chart.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: nextcloud-values
- namespace: nextcloud
- annotations:
- kube-1password: v32a4zpuvhmxxrwmtmmv6526ry
- kube-1password/vault: Kubernetes
- kube-1password/secret-text-key: values.yaml
-type: Opaque
----
-
-apiVersion: helm.fluxcd.io/v1
-kind: HelmRelease
-metadata:
- name: nextcloud
- namespace: nextcloud
-spec:
- chart:
- repository: https://nextcloud.github.io/helm/
- name: nextcloud
- version: 2.6.3
- maxHistory: 5
- valuesFrom:
- - secretKeyRef:
- name: nextcloud-values
- namespace: nextcloud
- key: values.yaml
- optional: false
- values:
- image:
- tag: 21.0.1-apache
- pullPolicy: IfNotPresent
- replicaCount: 1
- ingress:
- enabled: true
- annotations:
- cert-manager.io/cluster-issuer: letsencrypt
- nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
- nginx.ingress.kubernetes.io/proxy-body-size: "0"
- tls:
- - hosts:
- - nextcloud.cluster.fun
- secretName: nextcloud-ingress
- nextcloud:
- host: nextcloud.cluster.fun
- persistence:
- enabled: true
- storageClass: scw-bssd-retain
- size: 5Gi
- redis:
- enabled: true
- cronjob:
- enabled: true
- resources:
- requests:
- memory: 500Mi
-
diff --git a/manifests/nodered/vpa.yaml b/manifests/nodered/vpa.yaml
new file mode 100644
index 0000000..a02c4ae
--- /dev/null
+++ b/manifests/nodered/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: node-red
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: node-red
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/opengraph/vpa.yaml b/manifests/opengraph/vpa.yaml
new file mode 100644
index 0000000..32c8fc4
--- /dev/null
+++ b/manifests/opengraph/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: opengraph
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: opengraph
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/outline/vpa.yaml b/manifests/outline/vpa.yaml
new file mode 100644
index 0000000..8e172c7
--- /dev/null
+++ b/manifests/outline/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: outline
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: StatefulSet
+ name: outline
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/paradoxfox/vpa.yaml b/manifests/paradoxfox/vpa.yaml
new file mode 100644
index 0000000..eb0d06d
--- /dev/null
+++ b/manifests/paradoxfox/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: paradoxfox
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: paradoxfox
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/qr/vpa.yaml b/manifests/qr/vpa.yaml
new file mode 100644
index 0000000..15a9bab
--- /dev/null
+++ b/manifests/qr/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: qr
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: qr
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/rss/vpa.yaml b/manifests/rss/vpa.yaml
new file mode 100644
index 0000000..439d3fb
--- /dev/null
+++ b/manifests/rss/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: rss
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: rss
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/skooner/manifest.yaml b/manifests/skooner/manifest.yaml
index 04dddfd..4a0c828 100644
--- a/manifests/skooner/manifest.yaml
+++ b/manifests/skooner/manifest.yaml
@@ -43,6 +43,7 @@ spec:
containers:
- name: skooner
image: ghcr.io/skooner-k8s/skooner:stable
+ imagePullPolicy: Always
ports:
- containerPort: 4654
livenessProbe:
diff --git a/manifests/svg-to-dxf/vpa.yaml b/manifests/svg-to-dxf/vpa.yaml
new file mode 100644
index 0000000..26e7246
--- /dev/null
+++ b/manifests/svg-to-dxf/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: svg-to-dxf
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: svg-to-dxf
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/talks/vpa.yaml b/manifests/talks/vpa.yaml
new file mode 100644
index 0000000..42a368e
--- /dev/null
+++ b/manifests/talks/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: talks
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: talks
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/tank/vpa.yaml b/manifests/tank/vpa.yaml
new file mode 100644
index 0000000..2169a12
--- /dev/null
+++ b/manifests/tank/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: tank
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: tank
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/text-to-dxf/vpa.yaml b/manifests/text-to-dxf/vpa.yaml
new file mode 100644
index 0000000..6f05b07
--- /dev/null
+++ b/manifests/text-to-dxf/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: text-to-dxf
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: text-to-dxf
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/til/vpa.yaml b/manifests/til/vpa.yaml
new file mode 100644
index 0000000..8fc4f07
--- /dev/null
+++ b/manifests/til/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: til
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: til
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/tweetsvg/vpa.yaml b/manifests/tweetsvg/vpa.yaml
new file mode 100644
index 0000000..cd05e6b
--- /dev/null
+++ b/manifests/tweetsvg/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: tweetsvg
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: tweetsvg
+ updatePolicy:
+ updateMode: "Auto"
diff --git a/manifests/twitter-profile-pic/vpa.yaml b/manifests/twitter-profile-pic/vpa.yaml
new file mode 100644
index 0000000..1fdbe24
--- /dev/null
+++ b/manifests/twitter-profile-pic/vpa.yaml
@@ -0,0 +1,11 @@
+apiVersion: autoscaling.k8s.io/v1
+kind: VerticalPodAutoscaler
+metadata:
+ name: twitter-profile-pic
+spec:
+ targetRef:
+ apiVersion: "apps/v1"
+ kind: Deployment
+ name: twitter-profile-pic
+ updatePolicy:
+ updateMode: "Auto"