This commit is contained in:
Marcus Noble 2021-12-31 18:54:00 +00:00
commit 3a0ace71a6
31 changed files with 2639 additions and 603 deletions

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: anniversary
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: anniversary
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,24 @@
# apiVersion: argoproj.io/v1alpha1
# kind: Application
# metadata:
# name: calendso
# namespace: argocd
# finalizers:
# - resources-finalizer.argocd.argoproj.io
# spec:
# project: cluster.fun
# destination:
# namespace: calendso
# name: cluster-fun (scaleway)
# source:
# path: manifests/calendso
# repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
# targetRevision: HEAD
# syncPolicy:
# automated: {}
# syncOptions:
# - CreateNamespace=true
# ignoreDifferences:
# - kind: Secret
# jsonPointers:
# - /data

View File

@ -16,6 +16,8 @@ spec:
targetRevision: HEAD targetRevision: HEAD
syncPolicy: syncPolicy:
automated: {} automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences: ignoreDifferences:
- kind: Secret - kind: Secret
jsonPointers: jsonPointers:

27
manifests/_apps/vpa.yaml Normal file
View File

@ -0,0 +1,27 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vpa
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: kube-system
name: cluster-fun (scaleway)
source:
repoURL: 'https://charts.fairwinds.com/stable'
targetRevision: 0.5.0
chart: vpa
helm:
version: v3
values: |-
recommender:
extraArgs:
prometheus-address: "http://prometheus-server.monitoring.svc:80"
storage: prometheus
admissionController:
enabled: true
syncPolicy:
automated: {}

11
manifests/base64/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: base64
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: base64
updatePolicy:
updateMode: "Auto"

11
manifests/blog/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: blog
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: blog
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,120 @@
apiVersion: v1
kind: Secret
metadata:
name: calendso
namespace: calendso
annotations:
kube-1password: shgjmetsq7fcizmzzdn5ishzxu
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: calendso
namespace: calendso
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: calendso
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: calendso
namespace: calendso
spec:
replicas: 1
selector:
matchLabels:
app: calendso
template:
metadata:
labels:
app: calendso
spec:
containers:
- name: web
image: calendso/calendso:latest
imagePullPolicy: Always
command:
- sh
- -c
- |
apt update && apt install -y netcat
/app/scripts/start.sh
env:
- name: NEXT_PUBLIC_LICENSE_CONSENT
value: agree
- name: NEXT_PUBLIC_TELEMETRY_KEY
value: ""
- name: BASE_URL
value: "https://meet.marcusnoble.co.uk"
- name: NEXT_PUBLIC_APP_URL
value: "https://meet.marcusnoble.co.uk"
- name: NODE_ENV
value: production
- name: POSTGRES_DB
value: calendso
- name: DATABASE_HOST
value: localhost:5432
envFrom:
- secretRef:
name: calendso
ports:
- containerPort: 3000
name: web
- name: postgres
image: postgres:9-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5432
name: db
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: calendso
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: calendso
key: POSTGRES_PASSWORD
- name: POSTGRES_DB
value: calendso
- name: PGDATA
value: /var/lib/postgresql/data/calendso
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: calendso
namespace: calendso
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- meet.marcusnoble.co.uk
secretName: calendso-ingress
rules:
- host: meet.marcusnoble.co.uk
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: calendso
port:
number: 80

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: cors-proxy
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: cors-proxy
updatePolicy:
updateMode: "Auto"

11
manifests/cv/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: cv
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: cv
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: dashboard
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: dashboard
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: feed-fetcher
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: feed-fetcher
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: goplayground
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: goplayground
updatePolicy:
updateMode: "Auto"

View File

@ -1,133 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: harbor
---
apiVersion: v1
kind: Secret
metadata:
name: harbor-values
namespace: harbor
annotations:
kube-1password: igey7vjjiqmj25v64eck7cyj34
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: values.yaml
type: Opaque
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: harbor
namespace: harbor
spec:
chart:
repository: https://helm.goharbor.io
name: harbor
version: 1.7.0
maxHistory: 4
skipCRDs: false
valuesFrom:
- secretKeyRef:
name: harbor-values
namespace: harbor
key: values.yaml
optional: false
values:
fullnameOverride: harbor-harbor-harbor
externalURL: https://docker.cluster.fun
updateStrategy:
type: Recreate
expose:
type: ingress
tls:
enabled: true
certSource: secret
secret:
secretName: harbor-harbor-ingress
ingress:
hosts:
core: docker.cluster.fun
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
portal:
replicas: 2
priorityClassName: system-cluster-critical
resources:
requests:
memory: 64Mi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: component
operator: In
values:
- portal
- key: app
operator: In
values:
- harbor
topologyKey: kubernetes.io/hostname
core:
replicas: 2
priorityClassName: system-cluster-critical
resources:
requests:
memory: 64Mi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: component
operator: In
values:
- core
- key: app
operator: In
values:
- harbor
topologyKey: kubernetes.io/hostname
jobservice:
replicas: 1
resources:
requests:
memory: 64Mi
jobLoggers:
- stdout
registry:
replicas: 2
priorityClassName: system-cluster-critical
registry:
resources:
requests:
memory: 64Mi
controller:
resources:
requests:
memory: 64Mi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: component
operator: In
values:
- registry
- key: app
operator: In
values:
- harbor
topologyKey: kubernetes.io/hostname
chartmuseum:
enabled: false
notary:
enabled: false
trivy:
enabled: false
metrics:
enabled: true

File diff suppressed because it is too large Load Diff

View File

@ -70,6 +70,7 @@ data:
- job_name: kubernetes-pods - job_name: kubernetes-pods
pipeline_stages: pipeline_stages:
- docker: {} - docker: {}
- cri: {}
- match: - match:
selector: '{app="weave-net"}' selector: '{app="weave-net"}'
action: drop action: drop
@ -171,417 +172,6 @@ data:
- action: labelmap - action: labelmap
regex: __meta_kubernetes_pod_label_(.+) regex: __meta_kubernetes_pod_label_(.+)
# - job_name: kubernetes-pods-name
# pipeline_stages:
# - docker: {}
# - match:
# selector: '{name="weave-net"}'
# action: drop
# - match:
# selector: '{filename=~".*konnectivity.*"}'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*/healthz.*"'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*kube-probe/.*"'
# action: drop
# - match:
# selector: '{app="internal-proxy"}'
# action: drop
# # - match:
# # selector: '{k8s_app="traefik-ingress-lb"}'
# # stages:
# # - json:
# # expressions:
# # request_host: RequestHost
# # request_path: RequestPath
# # error: error
# # - drop:
# # source: "request_path"
# # value: "/healthz"
# # - template:
# # source: has_error
# # template: '{{ if .error }}true{{ else }}false{{ end }}'
# # - labels:
# # request_host:
# # has_error:
# kubernetes_sd_configs:
# - role: pod
# relabel_configs:
# - source_labels:
# - __meta_kubernetes_pod_label_name
# target_label: __service__
# - source_labels:
# - __meta_kubernetes_pod_node_name
# target_label: __host__
# - action: drop
# regex: ''
# source_labels:
# - __service__
# - action: labelmap
# regex: __meta_kubernetes_pod_label_(.+)
# - action: replace
# replacement: $1
# separator: /
# source_labels:
# - __meta_kubernetes_namespace
# - __service__
# target_label: job
# - action: replace
# source_labels:
# - __meta_kubernetes_namespace
# target_label: namespace
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_name
# target_label: pod
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_container_name
# target_label: container
# - replacement: /var/log/pods/*$1/*.log
# separator: /
# source_labels:
# - __meta_kubernetes_pod_uid
# - __meta_kubernetes_pod_container_name
# target_label: __path__
# - job_name: kubernetes-pods-app
# pipeline_stages:
# - docker: {}
# - match:
# selector: '{name="weave-net"}'
# action: drop
# - match:
# selector: '{filename=~".*konnectivity.*"}'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*/healthz.*"'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*kube-probe/.*"'
# action: drop
# - match:
# selector: '{app="internal-proxy"}'
# action: drop
# # - match:
# # selector: '{k8s_app="traefik-ingress-lb"}'
# # stages:
# # - json:
# # expressions:
# # request_host: RequestHost
# # request_path: RequestPath
# # error: error
# # - drop:
# # source: "request_path"
# # value: "/healthz"
# # - template:
# # source: has_error
# # template: '{{ if .error }}true{{ else }}false{{ end }}'
# # - labels:
# # request_host:
# # has_error:
# kubernetes_sd_configs:
# - role: pod
# relabel_configs:
# - action: drop
# regex: .+
# source_labels:
# - __meta_kubernetes_pod_label_name
# - source_labels:
# - __meta_kubernetes_pod_label_app
# target_label: __service__
# - source_labels:
# - __meta_kubernetes_pod_node_name
# target_label: __host__
# - action: drop
# regex: ''
# source_labels:
# - __service__
# - action: labelmap
# regex: __meta_kubernetes_pod_label_(.+)
# - action: replace
# replacement: $1
# separator: /
# source_labels:
# - __meta_kubernetes_namespace
# - __service__
# target_label: job
# - action: replace
# source_labels:
# - __meta_kubernetes_namespace
# target_label: namespace
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_name
# target_label: pod
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_container_name
# target_label: container
# - replacement: /var/log/pods/*$1/*.log
# separator: /
# source_labels:
# - __meta_kubernetes_pod_uid
# - __meta_kubernetes_pod_container_name
# target_label: __path__
# - job_name: kubernetes-pods-direct-controllers
# pipeline_stages:
# - docker: {}
# - match:
# selector: '{name="weave-net"}'
# action: drop
# - match:
# selector: '{filename=~".*konnectivity.*"}'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*/healthz.*"'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*kube-probe/.*"'
# action: drop
# - match:
# selector: '{app="internal-proxy"}'
# action: drop
# # - match:
# # selector: '{k8s_app="traefik-ingress-lb"}'
# # stages:
# # - json:
# # expressions:
# # request_host: RequestHost
# # request_path: RequestPath
# # error: error
# # - drop:
# # source: "request_path"
# # value: "/healthz"
# # - template:
# # source: has_error
# # template: '{{ if .error }}true{{ else }}false{{ end }}'
# # - labels:
# # request_host:
# # has_error:
# kubernetes_sd_configs:
# - role: pod
# relabel_configs:
# - action: drop
# regex: .+
# separator: ''
# source_labels:
# - __meta_kubernetes_pod_label_name
# - __meta_kubernetes_pod_label_app
# - action: drop
# regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
# source_labels:
# - __meta_kubernetes_pod_controller_name
# - source_labels:
# - __meta_kubernetes_pod_controller_name
# target_label: __service__
# - source_labels:
# - __meta_kubernetes_pod_node_name
# target_label: __host__
# - action: drop
# regex: ''
# source_labels:
# - __service__
# - action: labelmap
# regex: __meta_kubernetes_pod_label_(.+)
# - action: replace
# replacement: $1
# separator: /
# source_labels:
# - __meta_kubernetes_namespace
# - __service__
# target_label: job
# - action: replace
# source_labels:
# - __meta_kubernetes_namespace
# target_label: namespace
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_name
# target_label: pod
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_container_name
# target_label: container
# - replacement: /var/log/pods/*$1/*.log
# separator: /
# source_labels:
# - __meta_kubernetes_pod_uid
# - __meta_kubernetes_pod_container_name
# target_label: __path__
# - job_name: kubernetes-pods-indirect-controller
# pipeline_stages:
# - docker: {}
# - match:
# selector: '{name="weave-net"}'
# action: drop
# - match:
# selector: '{filename=~".*konnectivity.*"}'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*/healthz.*"'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*kube-probe/.*"'
# action: drop
# - match:
# selector: '{app="internal-proxy"}'
# action: drop
# # - match:
# # selector: '{k8s_app="traefik-ingress-lb"}'
# # stages:
# # - json:
# # expressions:
# # request_host: RequestHost
# # request_path: RequestPath
# # error: error
# # - drop:
# # source: "request_path"
# # value: "/healthz"
# # - template:
# # source: has_error
# # template: '{{ if .error }}true{{ else }}false{{ end }}'
# # - labels:
# # request_host:
# # has_error:
# kubernetes_sd_configs:
# - role: pod
# relabel_configs:
# - action: drop
# regex: .+
# separator: ''
# source_labels:
# - __meta_kubernetes_pod_label_name
# - __meta_kubernetes_pod_label_app
# - action: keep
# regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
# source_labels:
# - __meta_kubernetes_pod_controller_name
# - action: replace
# regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}'
# source_labels:
# - __meta_kubernetes_pod_controller_name
# target_label: __service__
# - source_labels:
# - __meta_kubernetes_pod_node_name
# target_label: __host__
# - action: drop
# regex: ''
# source_labels:
# - __service__
# - action: labelmap
# regex: __meta_kubernetes_pod_label_(.+)
# - action: replace
# replacement: $1
# separator: /
# source_labels:
# - __meta_kubernetes_namespace
# - __service__
# target_label: job
# - action: replace
# source_labels:
# - __meta_kubernetes_namespace
# target_label: namespace
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_name
# target_label: pod
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_container_name
# target_label: container
# - replacement: /var/log/pods/*$1/*.log
# separator: /
# source_labels:
# - __meta_kubernetes_pod_uid
# - __meta_kubernetes_pod_container_name
# target_label: __path__
# - job_name: kubernetes-pods-static
# pipeline_stages:
# - docker: {}
# - match:
# selector: '{name="weave-net"}'
# action: drop
# - match:
# selector: '{filename=~".*konnectivity.*"}'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*/healthz.*"'
# action: drop
# - match:
# selector: '{name=~".*"} |~ ".*kube-probe/.*"'
# action: drop
# - match:
# selector: '{app="internal-proxy"}'
# action: drop
# # - match:
# # selector: '{k8s_app="traefik-ingress-lb"}'
# # stages:
# # - json:
# # expressions:
# # request_host: RequestHost
# # request_path: RequestPath
# # error: error
# # - drop:
# # source: "request_path"
# # value: "/healthz"
# # - template:
# # source: has_error
# # template: '{{ if .error }}true{{ else }}false{{ end }}'
# # - labels:
# # request_host:
# # has_error:
# kubernetes_sd_configs:
# - role: pod
# relabel_configs:
# - action: drop
# regex: ''
# source_labels:
# - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_label_component
# target_label: __service__
# - source_labels:
# - __meta_kubernetes_pod_node_name
# target_label: __host__
# - action: drop
# regex: ''
# source_labels:
# - __service__
# - action: labelmap
# regex: __meta_kubernetes_pod_label_(.+)
# - action: replace
# replacement: $1
# separator: /
# source_labels:
# - __meta_kubernetes_namespace
# - __service__
# target_label: job
# - action: replace
# source_labels:
# - __meta_kubernetes_namespace
# target_label: namespace
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_name
# target_label: pod
# - action: replace
# source_labels:
# - __meta_kubernetes_pod_container_name
# target_label: container
# - replacement: /var/log/pods/*$1/*.log
# separator: /
# source_labels:
# - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
# - __meta_kubernetes_pod_container_name
# target_label: __path__
--- ---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -667,7 +257,7 @@ spec:
serviceAccountName: promtail serviceAccountName: promtail
containers: containers:
- name: promtail - name: promtail
image: "grafana/promtail:2.2.1" image: "grafana/promtail:2.4.1"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
args: args:
- "-config.file=/etc/promtail/promtail.yaml" - "-config.file=/etc/promtail/promtail.yaml"

View File

@ -0,0 +1,926 @@
---
# Source: nextcloud/charts/redis/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-nextcloud-redis
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
release: "nextcloud-nextcloud"
heritage: "Helm"
annotations:
kube-1password: u54jxidod7tlnpwva37f5hcu5y
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
# Source: nextcloud/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
annotations:
kube-1password: iaz4xmtr2czpsjl6xirhryzfia
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-s3
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
annotations:
kube-1password: 7zanxzbyzfctc5d2yqfq6e5zcy
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: s3.config.php
type: Opaque
---
# Source: nextcloud/charts/redis/templates/configmap-scripts.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-nextcloud-redis-scripts
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
heritage: Helm
release: nextcloud-nextcloud
data:
start-master.sh: |
#!/bin/bash
if [[ -n $REDIS_PASSWORD_FILE ]]; then
password_aux=`cat ${REDIS_PASSWORD_FILE}`
export REDIS_PASSWORD=$password_aux
fi
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec /run.sh "${ARGS[@]}"
start-slave.sh: |
#!/bin/bash
if [[ -n $REDIS_PASSWORD_FILE ]]; then
password_aux=`cat ${REDIS_PASSWORD_FILE}`
export REDIS_PASSWORD=$password_aux
fi
if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then
password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}`
export REDIS_MASTER_PASSWORD=$password_aux
fi
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec /run.sh "${ARGS[@]}"
---
# Source: nextcloud/charts/redis/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-nextcloud-redis
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
heritage: Helm
release: nextcloud-nextcloud
data:
redis.conf: |-
# User-supplied configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
master.conf: |-
dir /data
rename-command FLUSHDB ""
rename-command FLUSHALL ""
replica.conf: |-
dir /data
slave-read-only yes
rename-command FLUSHDB ""
rename-command FLUSHALL ""
---
# Source: nextcloud/charts/redis/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-nextcloud-redis-health
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
heritage: Helm
release: nextcloud-nextcloud
data:
ping_readiness_local.sh: |-
#!/bin/bash
no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
response=$(
timeout -s 3 $1 \
redis-cli \
-a $REDIS_PASSWORD $no_auth_warning \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
response=$(
timeout -s 3 $1 \
redis-cli \
-a $REDIS_PASSWORD $no_auth_warning \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
response=$(
timeout -s 3 $1 \
redis-cli \
-a $REDIS_MASTER_PASSWORD $no_auth_warning \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
no_auth_warning=$([[ "$(redis-cli --version)" =~ (redis-cli 5.*) ]] && echo --no-auth-warning)
response=$(
timeout -s 3 $1 \
redis-cli \
-a $REDIS_MASTER_PASSWORD $no_auth_warning \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: nextcloud/templates/config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-nextcloud-config
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
data:
general.config.php: |-
<?php
$CONFIG = array (
'overwriteprotocol' => 'https'
);
.htaccess: |-
# line below if for Apache 2.4
<ifModule mod_authz_core.c>
Require all denied
</ifModule>
# line below if for Apache 2.2
<ifModule !mod_authz_core.c>
deny from all
</ifModule>
# section for Apache 2.2 and 2.4
<ifModule mod_autoindex.c>
IndexIgnore *
</ifModule>
redis.config.php: |-
<?php
if (getenv('REDIS_HOST')) {
$CONFIG = array (
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.locking' => '\\OC\\Memcache\\Redis',
'redis' => array(
'host' => getenv('REDIS_HOST'),
'port' => getenv('REDIS_HOST_PORT') ?: 6379,
'password' => getenv('REDIS_HOST_PASSWORD'),
),
);
}
apache-pretty-urls.config.php: |-
<?php
$CONFIG = array (
'htaccess.RewriteBase' => '/',
);
apcu.config.php: |-
<?php
$CONFIG = array (
'memcache.local' => '\\OC\\Memcache\\APCu',
);
apps.config.php: |-
<?php
$CONFIG = array (
"apps_paths" => array (
0 => array (
"path" => OC::$SERVERROOT."/apps",
"url" => "/apps",
"writable" => false,
),
1 => array (
"path" => OC::$SERVERROOT."/custom_apps",
"url" => "/custom_apps",
"writable" => true,
),
),
);
autoconfig.php: |-
<?php
$autoconfig_enabled = false;
if (getenv('SQLITE_DATABASE')) {
$AUTOCONFIG["dbtype"] = "sqlite";
$AUTOCONFIG["dbname"] = getenv('SQLITE_DATABASE');
$autoconfig_enabled = true;
} elseif (getenv('MYSQL_DATABASE') && getenv('MYSQL_USER') && getenv('MYSQL_PASSWORD') && getenv('MYSQL_HOST')) {
$AUTOCONFIG["dbtype"] = "mysql";
$AUTOCONFIG["dbname"] = getenv('MYSQL_DATABASE');
$AUTOCONFIG["dbuser"] = getenv('MYSQL_USER');
$AUTOCONFIG["dbpass"] = getenv('MYSQL_PASSWORD');
$AUTOCONFIG["dbhost"] = getenv('MYSQL_HOST');
$autoconfig_enabled = true;
} elseif (getenv('POSTGRES_DB') && getenv('POSTGRES_USER') && getenv('POSTGRES_PASSWORD') && getenv('POSTGRES_HOST')) {
$AUTOCONFIG["dbtype"] = "pgsql";
$AUTOCONFIG["dbname"] = getenv('POSTGRES_DB');
$AUTOCONFIG["dbuser"] = getenv('POSTGRES_USER');
$AUTOCONFIG["dbpass"] = getenv('POSTGRES_PASSWORD');
$AUTOCONFIG["dbhost"] = getenv('POSTGRES_HOST');
$autoconfig_enabled = true;
}
if ($autoconfig_enabled) {
$AUTOCONFIG["directory"] = getenv('NEXTCLOUD_DATA_DIR') ?: "/var/www/html/data";
}
smtp.config.php: |-
<?php
if (getenv('SMTP_HOST') && getenv('MAIL_FROM_ADDRESS') && getenv('MAIL_DOMAIN')) {
$CONFIG = array (
'mail_smtpmode' => 'smtp',
'mail_smtphost' => getenv('SMTP_HOST'),
'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25),
'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '',
'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'),
'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN',
'mail_smtpname' => getenv('SMTP_NAME') ?: '',
'mail_smtppassword' => getenv('SMTP_PASSWORD') ?: '',
'mail_from_address' => getenv('MAIL_FROM_ADDRESS'),
'mail_domain' => getenv('MAIL_DOMAIN'),
);
}
---
# Source: nextcloud/templates/nextcloud-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nextcloud-nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "scw-bssd-retain"
---
# Source: nextcloud/charts/redis/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud-nextcloud-redis-headless
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
release: nextcloud-nextcloud
heritage: Helm
spec:
type: ClusterIP
clusterIP: None
ports:
- name: redis
port: 6379
targetPort: redis
selector:
app: redis
release: nextcloud-nextcloud
---
# Source: nextcloud/charts/redis/templates/redis-master-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud-nextcloud-redis-master
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
release: nextcloud-nextcloud
heritage: Helm
spec:
type: ClusterIP
ports:
- name: redis
port: 6379
targetPort: redis
selector:
app: redis
release: nextcloud-nextcloud
role: master
---
# Source: nextcloud/charts/redis/templates/redis-slave-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud-nextcloud-redis-slave
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
release: nextcloud-nextcloud
heritage: Helm
spec:
type: ClusterIP
ports:
- name: redis
port: 6379
targetPort: redis
selector:
app: redis
release: nextcloud-nextcloud
role: slave
---
# Source: nextcloud/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/component: app
---
# Source: nextcloud/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
template:
metadata:
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
nextcloud-nextcloud-redis-client: "true"
spec:
containers:
- name: nextcloud
image: "nextcloud:21.0.1-apache"
imagePullPolicy: IfNotPresent
env:
- name: SQLITE_DATABASE
value: "nextcloud"
- name: NEXTCLOUD_ADMIN_USER
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud
key: nextcloud-username
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud
key: nextcloud-password
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: nextcloud.cluster.fun
- name: NEXTCLOUD_DATA_DIR
value: "/var/www/html/data"
- name: REDIS_HOST
value: nextcloud-nextcloud-redis-master
- name: REDIS_HOST_PORT
value: "6379"
- name: REDIS_HOST_PASSWORD
value: changeme
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /status.php
port: http
httpHeaders:
- name: Host
value: "nextcloud.cluster.fun"
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /status.php
port: http
httpHeaders:
- name: Host
value: "nextcloud.cluster.fun"
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
resources:
requests:
memory: 500Mi
volumeMounts:
- name: nextcloud-data
mountPath: /var/www/
subPath: root
- name: nextcloud-data
mountPath: /var/www/html
subPath: html
- name: nextcloud-data
mountPath: /var/www/html/data
subPath: data
- name: nextcloud-data
mountPath: /var/www/html/config
subPath: config
- name: nextcloud-data
mountPath: /var/www/html/custom_apps
subPath: custom_apps
- name: nextcloud-data
mountPath: /var/www/tmp
subPath: tmp
- name: nextcloud-data
mountPath: /var/www/html/themes
subPath: themes
- name: nextcloud-config
mountPath: /var/www/html/config/general.config.php
subPath: general.config.php
- name: nextcloud-s3
mountPath: /var/www/html/config/s3.config.php
subPath: s3.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/.htaccess
subPath: .htaccess
- name: nextcloud-config
mountPath: /var/www/html/config/apache-pretty-urls.config.php
subPath: apache-pretty-urls.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/apcu.config.php
subPath: apcu.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/apps.config.php
subPath: apps.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/autoconfig.php
subPath: autoconfig.php
- name: nextcloud-config
mountPath: /var/www/html/config/redis.config.php
subPath: redis.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/smtp.config.php
subPath: smtp.config.php
volumes:
- name: nextcloud-data
persistentVolumeClaim:
claimName: nextcloud-nextcloud-nextcloud
- name: nextcloud-config
configMap:
name: nextcloud-nextcloud-config
- name: nextcloud-s3
secret:
secretName: nextcloud-s3
# Will mount configuration files as www-data (id: 33) for nextcloud
securityContext:
fsGroup: 33
---
# Source: nextcloud/charts/redis/templates/redis-master-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nextcloud-nextcloud-redis-master
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
release: nextcloud-nextcloud
heritage: Helm
spec:
selector:
matchLabels:
app: redis
release: nextcloud-nextcloud
role: master
serviceName: nextcloud-nextcloud-redis-headless
template:
metadata:
labels:
app: redis
chart: redis-11.0.5
release: nextcloud-nextcloud
role: master
annotations:
checksum/health: c0aae3fbf6b70535e576f3897c60cf19bbfa814f584e599380329bda59b56da1
checksum/configmap: f8ab8ce93e6b4e78f477182c06db788d39b372cbb49261bf85c85cdfea869df5
checksum/secret: 79779a23e0c21d77248d142206b297f89fa5241bb156f83be3705dbb0de0d6e8
spec:
securityContext:
fsGroup: 1001
serviceAccountName: default
containers:
- name: redis
image: docker.io/bitnami/redis:6.0.8-debian-10-r0
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
command:
- /bin/bash
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: REDIS_REPLICATION_MODE
value: master
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 5
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
null
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
volumes:
- name: start-scripts
configMap:
name: nextcloud-nextcloud-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: nextcloud-nextcloud-redis-health
defaultMode: 0755
- name: config
configMap:
name: nextcloud-nextcloud-redis
- name: redis-tmp-conf
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: redis-data
labels:
app: redis
release: nextcloud-nextcloud
heritage: Helm
component: master
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
selector:
updateStrategy:
type: RollingUpdate
---
# Source: nextcloud/charts/redis/templates/redis-slave-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nextcloud-nextcloud-redis-slave
namespace: nextcloud
labels:
app: redis
chart: redis-11.0.5
release: nextcloud-nextcloud
heritage: Helm
spec:
replicas: 2
serviceName: nextcloud-nextcloud-redis-headless
selector:
matchLabels:
app: redis
release: nextcloud-nextcloud
role: slave
template:
metadata:
labels:
app: redis
release: nextcloud-nextcloud
chart: redis-11.0.5
role: slave
annotations:
checksum/health: c0aae3fbf6b70535e576f3897c60cf19bbfa814f584e599380329bda59b56da1
checksum/configmap: f8ab8ce93e6b4e78f477182c06db788d39b372cbb49261bf85c85cdfea869df5
checksum/secret: 79779a23e0c21d77248d142206b297f89fa5241bb156f83be3705dbb0de0d6e8
spec:
securityContext:
fsGroup: 1001
serviceAccountName: default
containers:
- name: redis
image: docker.io/bitnami/redis:6.0.8-debian-10-r0
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
command:
- /bin/bash
- -c
- /opt/bitnami/scripts/start-scripts/start-slave.sh
env:
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: nextcloud-nextcloud-redis-master-0.nextcloud-nextcloud-redis-headless.nextcloud.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 11
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 10
resources:
null
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: nextcloud-nextcloud-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: nextcloud-nextcloud-redis-health
defaultMode: 0755
- name: config
configMap:
name: nextcloud-nextcloud-redis
- name: redis-tmp-conf
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: redis-data
labels:
app: redis
release: nextcloud-nextcloud
heritage: Helm
component: slave
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
selector:
updateStrategy:
type: RollingUpdate
---
# Source: nextcloud/templates/cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: nextcloud-nextcloud-cron
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
annotations:
{}
spec:
schedule: "*/5 * * * *"
concurrencyPolicy: Forbid
failedJobsHistoryLimit: 5
successfulJobsHistoryLimit: 2
jobTemplate:
metadata:
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/managed-by: Helm
spec:
template:
metadata:
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/managed-by: Helm
spec:
restartPolicy: Never
containers:
- name: nextcloud
image: "nextcloud:21.0.1-apache"
imagePullPolicy: IfNotPresent
command: [ "curl" ]
args:
- "--fail"
- "-L"
- "https://nextcloud.cluster.fun/cron.php"
resources:
requests:
memory: 500Mi
---
# Source: nextcloud/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
rules:
- host: nextcloud.cluster.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nextcloud-nextcloud
port:
number: 8080
tls:
- hosts:
- nextcloud.cluster.fun
secretName: nextcloud-ingress

View File

@ -1,58 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-values
namespace: nextcloud
annotations:
kube-1password: v32a4zpuvhmxxrwmtmmv6526ry
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: values.yaml
type: Opaque
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: nextcloud
namespace: nextcloud
spec:
chart:
repository: https://nextcloud.github.io/helm/
name: nextcloud
version: 2.6.3
maxHistory: 5
valuesFrom:
- secretKeyRef:
name: nextcloud-values
namespace: nextcloud
key: values.yaml
optional: false
values:
image:
tag: 21.0.1-apache
pullPolicy: IfNotPresent
replicaCount: 1
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
tls:
- hosts:
- nextcloud.cluster.fun
secretName: nextcloud-ingress
nextcloud:
host: nextcloud.cluster.fun
persistence:
enabled: true
storageClass: scw-bssd-retain
size: 5Gi
redis:
enabled: true
cronjob:
enabled: true
resources:
requests:
memory: 500Mi

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: node-red
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: node-red
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: opengraph
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: opengraph
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: outline
spec:
targetRef:
apiVersion: "apps/v1"
kind: StatefulSet
name: outline
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: paradoxfox
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: paradoxfox
updatePolicy:
updateMode: "Auto"

11
manifests/qr/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: qr
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: qr
updatePolicy:
updateMode: "Auto"

11
manifests/rss/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: rss
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: rss
updatePolicy:
updateMode: "Auto"

View File

@ -43,6 +43,7 @@ spec:
containers: containers:
- name: skooner - name: skooner
image: ghcr.io/skooner-k8s/skooner:stable image: ghcr.io/skooner-k8s/skooner:stable
imagePullPolicy: Always
ports: ports:
- containerPort: 4654 - containerPort: 4654
livenessProbe: livenessProbe:

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: svg-to-dxf
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: svg-to-dxf
updatePolicy:
updateMode: "Auto"

11
manifests/talks/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: talks
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: talks
updatePolicy:
updateMode: "Auto"

11
manifests/tank/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: tank
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: tank
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: text-to-dxf
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: text-to-dxf
updatePolicy:
updateMode: "Auto"

11
manifests/til/vpa.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: til
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: til
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: tweetsvg
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: tweetsvg
updatePolicy:
updateMode: "Auto"

View File

@ -0,0 +1,11 @@
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: twitter-profile-pic
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: twitter-profile-pic
updatePolicy:
updateMode: "Auto"