Compare commits

..

No commits in common. "master" and "increase_cluster_max" have entirely different histories.

156 changed files with 5842 additions and 9267 deletions

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-auth-proxy
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: auth-proxy
name: cluster-fun (v2)
source:
path: manifests/auth-proxy
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: base64
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: base64
name: civo
source:
path: manifests/base64
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-blackhole
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: kube-system
name: cluster-fun (v2)
source:
path: manifests/blackhole
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-blog
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: blog
name: cluster-fun (v2)
source:
path: manifests/blog
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cel-tester
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: cel-tester
name: civo
source:
path: manifests/cel-tester
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data

View File

@ -1,76 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager-civo
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: cert-manager
name: civo
source:
path: manifests/certmanager-civo
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-cert-manager-issuer
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: cert-manager
name: cluster-fun (v2)
source:
path: manifests/certmanager_chart
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-cert-manager-chart
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: cert-manager
name: cluster-fun (v2)
source:
repoURL: 'https://charts.jetstack.io'
targetRevision: 1.11.0
chart: cert-manager
helm:
version: v3
values: |-
installCRDs: "true"
resources:
requests:
memory: 32Mi
limits:
memory: 64Mi
syncPolicy:
automated: {}
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: civo-versions
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: civo-versions
name: civo
source:
path: manifests/civo-versions
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cv
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: cv
name: civo
source:
path: manifests/cv
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-dashboard
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: dashboard
name: cluster-fun (v2)
source:
path: manifests/dashboard
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-devstats-viewer
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: devstats-viewer
name: cluster-fun (v2)
source:
path: manifests/devstats-viewer
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
syncOptions:
- CreateNamespace=true
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: feed-fetcher
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: feed-fetcher
name: civo
source:
path: manifests/feed-fetcher
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-git-sync
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: git-sync
name: cluster-fun (v2)
source:
path: manifests/git-sync
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-gitea
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: gitea
name: cluster-fun (v2)
source:
path: manifests/gitea
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: goplayground
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: goplayground
name: civo
source:
path: manifests/goplayground
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data

View File

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: link
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: link
name: civo
source:
path: manifests/link
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-marcusnoble
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: marcusnoble
name: cluster-fun (v2)
source:
path: manifests/marcusnoble
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-mastodon-digest
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: mastodon-digest
name: cluster-fun (v2)
source:
path: manifests/mastodon-digest
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mastodon-to-airtable
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: mastodon-to-airtable
name: civo
source:
path: manifests/mastodon-to-airtable
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-matrix
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: chat
name: cluster-fun (v2)
source:
path: manifests/matrix_chart
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
syncOptions:
- CreateNamespace=true
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-mealie
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: mealie
name: cluster-fun (v2)
source:
path: manifests/mealie
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring-civo
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: monitoring
name: civo
source:
path: manifests/monitoring-civo
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-monitoring
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: monitoring
name: cluster-fun (v2)
source:
path: manifests/monitoring
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-nextcloud
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: nextcloud
name: cluster-fun (v2)
source:
path: manifests/nextcloud_chart
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
syncOptions:
- CreateNamespace=true
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-nginx-lb
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: kube-system
name: cluster-fun (v2)
source:
path: manifests/nginx-lb
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-nodered
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: node-red
name: cluster-fun (v2)
source:
path: manifests/nodered
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: opengraph
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: opengraph
name: civo
source:
path: manifests/opengraph
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-outline
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: outline
name: cluster-fun (v2)
source:
path: manifests/outline
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: proxy-civo
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: proxy-civo
name: civo
source:
path: manifests/proxy-civo
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: qr
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: qr
name: civo
source:
path: manifests/qr
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-redis
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: redis
name: cluster-fun (v2)
source:
path: manifests/redis
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,23 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-reloader
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: kube-system
name: cluster-fun (v2)
source:
repoURL: 'https://stakater.github.io/stakater-charts'
targetRevision: v0.0.89
chart: reloader
syncPolicy:
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-rss
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: rss
name: cluster-fun (v2)
source:
path: manifests/rss
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-starling
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: starling
name: cluster-fun (v2)
source:
path: manifests/starling
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: svg-to-dxf
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: svg-to-dxf
name: civo
source:
path: manifests/svg-to-dxf
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: talks
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: talks
name: civo
source:
path: manifests/talks
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-tank
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: tank
name: cluster-fun (v2)
source:
path: manifests/tank
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: text-to-dxf
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: text-to-dxf
name: civo
source:
path: manifests/text-to-dxf
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: til
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: til
name: civo
source:
path: manifests/til
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik-civo
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: kube-system
name: civo
source:
path: manifests/traefik
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: tweetsvg
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: tweetsvg
name: civo
source:
path: manifests/tweetsvg
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-twitter-profile-pic
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: twitter-profile-pic
name: cluster-fun (v2)
source:
path: manifests/twitter-profile-pic
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image
---

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: twitter-to-airtable
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: twitter-to-airtable
name: civo
source:
path: manifests/twitter-to-airtable
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
automated: {}
syncOptions:
- CreateNamespace=true
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
- group: apps
kind: Deployment
jqPathExpressions:
- .spec.template.spec.containers[]?.image

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cluster-fun-wallabag
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: cluster.fun
destination:
namespace: wallabag
name: cluster-fun (v2)
source:
path: manifests/wallabag
repoURL: "https://git.cluster.fun/AverageMarcus/cluster.fun.git"
targetRevision: HEAD
syncPolicy:
syncOptions:
- CreateNamespace=true
automated: {}
ignoreDifferences:
- kind: Secret
jsonPointers:
- /data
---

View File

@ -1,201 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: auth-proxy
namespace: auth-proxy
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- downloads.cluster.fun
- argo.cluster.fun
- code.cluster.fun
- jackett.cluster.fun
- printer.cluster.fun
- ender3pro.printer.cluster.fun
- flsunq5.printer.cluster.fun
- elegoomars2.printer.cluster.fun
- radarr.cluster.fun
- readarr.cluster.fun
- sonarr.cluster.fun
- lidarr.cluster.fun
- prowlarr.cluster.fun
- transmission.cluster.fun
- tekton.cluster.fun
- changedetection.cluster.fun
- grafana.cluster.fun
secretName: auth-proxy-ingress
rules:
- host: downloads.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: argo.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: code.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: jackett.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: printer.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: ender3pro.printer.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: flsunq5.printer.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: elegoomars2.printer.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: radarr.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: readarr.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: sonarr.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: lidarr.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: prowlarr.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: transmission.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: tekton.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: changedetection.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth
- host: grafana.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: auth

View File

@ -1,85 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: tekton-el
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki-distributed
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: vmcluster
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: auth-proxy
---

View File

@ -1,25 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: non-auth-proxy
namespace: auth-proxy
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- hello-world.cluster.fun
secretName: non-auth-proxy-ingress
rules:
- host: hello-world.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: tailscale-proxy
port:
name: non-auth

View File

@ -1,132 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: host-mappings
namespace: auth-proxy
labels:
app: proxy
data:
mapping.json: |
{
"tekton-el.auth-proxy.svc": "tekton-el.cluster.local",
"vmcluster.auth-proxy.svc": "vmcluster.cluster.local",
"loki.auth-proxy.svc": "loki-write.cluster.local",
"loki.auth-proxy.svc:80": "loki-write.cluster.local",
"loki-distributed.auth-proxy.svc": "loki-loki.cluster.local",
"loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local"
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: internal-proxy
namespace: auth-proxy
labels:
app: internal-proxy
annotations:
configmap.reloader.stakater.com/reload: "host-mappings"
secret.reloader.stakater.com/reload: "tailscale-auth"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: internal-proxy
template:
metadata:
labels:
app: internal-proxy
spec:
serviceAccountName: default
dnsPolicy: ClusterFirst
dnsConfig:
nameservers:
- 100.100.100.100
containers:
- name: proxy
image: rg.fr-par.scw.cloud/averagemarcus/proxy:latest
imagePullPolicy: Always
env:
- name: PROXY_DESTINATION
value: talos.tail4dfb.ts.net
- name: PORT
value: "8080"
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_HOSTNAME
value: auth-proxy-internal-proxy
ports:
- containerPort: 8080
protocol: TCP
volumeMounts:
- name: host-mappings
mountPath: /config/
- name: oauth-proxy
image: quay.io/oauth2-proxy/oauth2-proxy:v7.7.1
args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://localhost:8080
- --http-address=0.0.0.0:8181
- --email-domain=*
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQNFT
- --cookie-expire=336h0m0s
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: auth-proxy
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: auth-proxy
ports:
- containerPort: 8181
protocol: TCP
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
volumes:
- name: host-mappings
configMap:
name: host-mappings
---
apiVersion: v1
kind: Service
metadata:
name: tailscale-proxy
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: non-auth
port: 80
protocol: TCP
targetPort: 8080
- name: auth
port: 81
protocol: TCP
targetPort: 8181
selector:
app: internal-proxy
type: ClusterIP
---

View File

@ -1,20 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: auth-proxy
namespace: auth-proxy
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-auth
namespace: auth-proxy
annotations:
kube-1password: 2cqycmsgv5r7vcyvjpblcl2l4y
kube-1password/vault: Kubernetes
type: Opaque
---

View File

@ -1,71 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: base64
namespace: base64
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: base64
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: base64
namespace: base64
spec:
replicas: 1
selector:
matchLabels:
app: base64
template:
metadata:
labels:
app: base64
spec:
imagePullSecrets:
- name: docker-config
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/base64:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
resources:
limits:
memory: 5Mi
requests:
memory: 5Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: base64
namespace: base64
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- base64.cluster.fun
secretName: base64-ingress
rules:
- host: base64.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: base64
port:
number: 80

View File

@ -37,11 +37,12 @@ spec:
resources:
limits:
memory: 10Mi
requests:
memory: 10Mi
---
apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: black-hole
@ -51,9 +52,6 @@ spec:
- http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: black-hole
port:
number: 80
serviceName: black-hole
servicePort: 80

View File

@ -1,4 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
name: blog
---
apiVersion: v1
kind: Service
metadata:
name: blog
@ -29,7 +34,7 @@ spec:
spec:
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/blog:latest
image: docker.cluster.fun/averagemarcus/blog:latest
imagePullPolicy: Always
ports:
- containerPort: 8000
@ -39,27 +44,18 @@ spec:
memory: 200Mi
requests:
memory: 200Mi
livenessProbe:
httpGet:
path: /healthz
port: web
initialDelaySeconds: 10
readinessProbe:
httpGet:
path: /healthz
port: web
initialDelaySeconds: 10
---
apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blog
namespace: blog
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- marcusnoble.co.uk
@ -69,24 +65,22 @@ spec:
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: blog
port:
number: 80
serviceName: blog
servicePort: 80
---
apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blog-www
namespace: blog
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- www.marcusnoble.co.uk
@ -96,24 +90,22 @@ spec:
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: blog
port:
number: 80
serviceName: blog
servicePort: 80
---
apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blog-blog
namespace: blog
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- blog.marcusnoble.co.uk
@ -123,10 +115,7 @@ spec:
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: blog
port:
number: 80
serviceName: blog
servicePort: 80

70
manifests/buzzers.yaml Normal file
View File

@ -0,0 +1,70 @@
apiVersion: v1
kind: Namespace
metadata:
name: buzzers
---
apiVersion: v1
kind: Service
metadata:
name: buzzers
namespace: buzzers
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: buzzers
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: buzzers
namespace: buzzers
spec:
replicas: 1
selector:
matchLabels:
app: buzzers
template:
metadata:
labels:
app: buzzers
spec:
containers:
- name: web
image: docker.cluster.fun/averagemarcus/buzzers:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
resources:
limits:
memory: 283Mi
requests:
memory: 283Mi
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: buzzers
namespace: buzzers
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- buzzers.cluster.fun
secretName: buzzers-ingress
rules:
- host: buzzers.cluster.fun
http:
paths:
- path: /
backend:
serviceName: buzzers
servicePort: 80

114
manifests/cctv.yaml Normal file
View File

@ -0,0 +1,114 @@
apiVersion: v1
kind: Namespace
metadata:
name: cctv
---
apiVersion: v1
kind: Secret
metadata:
name: cctv-auth
namespace: cctv
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cctv-auth
namespace: cctv
labels:
app: cctv-auth
spec:
replicas: 1
selector:
matchLabels:
app: cctv-auth
template:
metadata:
labels:
app: cctv-auth
spec:
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://inlets.inlets.svc.cluster.local
- --http-address=$(HOST_IP):8080
- --redirect-url=https://cctv.cluster.fun/oauth2/callback
- --email-domain=*
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQN
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: cctv-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: cctv-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v5.1.1
name: oauth-proxy
ports:
- containerPort: 8080
protocol: TCP
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
---
apiVersion: v1
kind: Service
metadata:
name: cctv-auth
namespace: cctv
labels:
app: cctv-auth
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: cctv-auth
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cctv-auth
namespace: cctv
labels:
app: cctv-auth
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- cctv.cluster.fun
secretName: cctv-ingress
rules:
- host: cctv.cluster.fun
http:
paths:
- path: /
backend:
serviceName: cctv-auth
servicePort: 80

View File

@ -1,70 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: cel-tester
namespace: cel-tester
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: cel-tester
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cel-tester
namespace: cel-tester
spec:
replicas: 1
selector:
matchLabels:
app: cel-tester
template:
metadata:
labels:
app: cel-tester
spec:
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/cel-tester:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
resources:
limits:
memory: 20Mi
requests:
memory: 20Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: cel-tester
namespace: cel-tester
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- cel-tester.cluster.fun
secretName: cel-tester-ingress
rules:
- host: cel-tester.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cel-tester
port:
number: 80

View File

@ -1,23 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
certmanager.k8s.io/disable-validation: "true"
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: letsencrypt@marcusnoble.co.uk
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: traefik

View File

@ -0,0 +1,47 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
certmanager.k8s.io/disable-validation: "true"
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: cert-manager
namespace: cert-manager
spec:
chart:
repository: https://charts.jetstack.io
name: cert-manager
version: v0.15.0
maxHistory: 5
values:
installCRDs: "true"
resources:
requests:
memory: 32Mi
limits:
memory: 64Mi
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: letsencrypt@marcusnoble.co.uk
privateKeySecretRef:
name: letsencrypt
solvers:
- selector: {}
http01:
ingress:
class: traefik

View File

@ -1,23 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
certmanager.k8s.io/disable-validation: "true"
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: letsencrypt@marcusnoble.co.uk
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View File

@ -1,88 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: civo-versions
namespace: civo-versions
annotations:
kube-1password: ybo7axn7wpks4z3u3gjhibnu5i
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: civo-versions
namespace: civo-versions
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: civo-versions
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: civo-versions
namespace: civo-versions
spec:
replicas: 1
selector:
matchLabels:
app: civo-versions
template:
metadata:
labels:
app: civo-versions
spec:
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/civo-versions:latest
imagePullPolicy: Always
ports:
- containerPort: 8000
name: web
env:
- name: PORT
value: "8000"
- name: API_KEY
valueFrom:
secretKeyRef:
name: civo-versions
key: API_KEY
resources:
limits:
memory: 30Mi
requests:
memory: 30Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: civo-versions
namespace: civo-versions
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- civo-versions.cluster.fun
secretName: civo-versions-ingress
rules:
- host: civo-versions.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: civo-versions
port:
number: 80

90
manifests/cors-proxy.yaml Normal file
View File

@ -0,0 +1,90 @@
apiVersion: v1
kind: Namespace
metadata:
name: cors-proxy
---
apiVersion: v1
kind: Service
metadata:
name: cors-proxy
namespace: cors-proxy
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8000
name: web
selector:
app: cors-proxy
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cors-proxy
namespace: cors-proxy
spec:
replicas: 1
selector:
matchLabels:
app: cors-proxy
template:
metadata:
labels:
app: cors-proxy
spec:
containers:
- name: web
image: docker.cluster.fun/averagemarcus/cors-proxy:latest
imagePullPolicy: Always
ports:
- containerPort: 8000
name: web
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cors-proxy
namespace: cors-proxy
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- cors-proxy.cluster.fun
secretName: cors-proxy-ingress
rules:
- host: cors-proxy.cluster.fun
http:
paths:
- path: /
backend:
serviceName: cors-proxy
servicePort: 80
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cors-proxy-mn
namespace: cors-proxy
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- cors-proxy.marcusnoble.co.uk
secretName: cors-proxy-mn-ingress
rules:
- host: cors-proxy.marcusnoble.co.uk
http:
paths:
- path: /
backend:
serviceName: cors-proxy
servicePort: 80

View File

@ -1,8 +1,13 @@
apiVersion: v1
kind: Namespace
metadata:
name: dashboard
---
apiVersion: v1
kind: Secret
metadata:
name: docker-config
namespace: cv
namespace: dashboard
annotations:
kube-1password: i6ngbk5zf4k52xgwdwnfup5bby
kube-1password/vault: Kubernetes
@ -14,8 +19,8 @@ data:
apiVersion: v1
kind: Service
metadata:
name: cv
namespace: cv
name: dashboard
namespace: dashboard
spec:
type: ClusterIP
ports:
@ -23,62 +28,58 @@ spec:
targetPort: web
name: web
selector:
app: cv
app: dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cv
namespace: cv
name: dashboard
namespace: dashboard
spec:
replicas: 1
selector:
matchLabels:
app: cv
app: dashboard
template:
metadata:
labels:
app: cv
app: dashboard
spec:
imagePullSecrets:
- name: docker-config
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus-private/cv:latest
image: docker.cluster.fun/private/dashboard:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
resources:
limits:
memory: 10Mi
memory: 50Mi
requests:
memory: 10Mi
memory: 50Mi
---
apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cv
namespace: cv
name: dashboard
namespace: dashboard
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- cv.marcusnoble.co.uk
secretName: cv-ingress
- dash.cluster.fun
secretName: dashboard-ingress
rules:
- host: cv.marcusnoble.co.uk
- host: dash.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cv
port:
number: 80
serviceName: dashboard
servicePort: 80

View File

@ -1,131 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: docker-config
namespace: dashboard
annotations:
kube-1password: i6ngbk5zf4k52xgwdwnfup5bby
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: .dockerconfigjson
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: e30=
---
apiVersion: v1
kind: Secret
metadata:
name: dashboard-auth
namespace: dashboard
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: dashboard
namespace: dashboard
spec:
type: ClusterIP
ports:
- port: 80
targetPort: auth
name: web
selector:
app: dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dashboard
namespace: dashboard
spec:
replicas: 1
selector:
matchLabels:
app: dashboard
template:
metadata:
labels:
app: dashboard
spec:
imagePullSecrets:
- name: docker-config
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://localhost:80
- --http-address=$(HOST_IP):8000
- --redirect-url=https://dash.cluster.fun/oauth2/callback
- --email-domain=marcusnoble.co.uk
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQNFT
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: dashboard-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: dashboard-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v7.7.1
name: oauth-proxy
ports:
- containerPort: 8000
protocol: TCP
name: auth
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
- name: web
image: rg.fr-par.scw.cloud/averagemarcus-private/dashboard:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard
namespace: dashboard
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- dash.cluster.fun
secretName: dashboard-ingress
rules:
- host: dash.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: dashboard
port:
number: 80

View File

@ -1,69 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: devstats-viewer
namespace: devstats-viewer
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: devstats-viewer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: devstats-viewer
namespace: devstats-viewer
spec:
replicas: 2
selector:
matchLabels:
app: devstats-viewer
template:
metadata:
labels:
app: devstats-viewer
spec:
imagePullSecrets:
- name: docker-config
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/devstats-viewer:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: web
resources:
limits:
memory: 10Mi
requests:
memory: 10Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: devstats-viewer
namespace: devstats-viewer
annotations:
cert-manager.io/cluster-issuer: letsencrypt
ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- devstats.cluster.fun
secretName: devstats-viewer-ingress
rules:
- host: devstats.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: devstats-viewer
port:
number: 80

115
manifests/downloads.yaml Normal file
View File

@ -0,0 +1,115 @@
apiVersion: v1
kind: Namespace
metadata:
name: downloads
---
apiVersion: v1
kind: Secret
metadata:
name: downloads-auth
namespace: downloads
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: downloads-auth
namespace: downloads
labels:
app: downloads-auth
spec:
replicas: 1
selector:
matchLabels:
app: downloads-auth
template:
metadata:
labels:
app: downloads-auth
spec:
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://inlets.inlets.svc.cluster.local
- --http-address=$(HOST_IP):8080
- --redirect-url=https://downloads.cluster.fun/oauth2/callback
- --email-domain=*
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQN
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: downloads-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: downloads-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v5.1.1
name: oauth-proxy
ports:
- containerPort: 8080
protocol: TCP
resources:
limits:
memory: 250Mi
requests:
memory: 250Mi
---
apiVersion: v1
kind: Service
metadata:
name: downloads-auth
namespace: downloads
labels:
app: downloads-auth
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: downloads-auth
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: downloads-auth
namespace: downloads
labels:
app: downloads-auth
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- downloads.cluster.fun
secretName: downloads-ingress
rules:
- host: downloads.cluster.fun
http:
paths:
- path: /
backend:
serviceName: downloads-auth
servicePort: 80

View File

@ -1,65 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: feed-fetcher
namespace: feed-fetcher
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: feed-fetcher
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: feed-fetcher
namespace: feed-fetcher
spec:
replicas: 1
selector:
matchLabels:
app: feed-fetcher
template:
metadata:
labels:
app: feed-fetcher
spec:
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/feed-fetcher:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: web
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: feed-fetcher
namespace: feed-fetcher
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- feed-fetcher.cluster.fun
secretName: feed-fetcher-ingress
rules:
- host: feed-fetcher.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: feed-fetcher
port:
number: 80

View File

@ -1,109 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: git-sync-github
namespace: git-sync
annotations:
kube-1password: cfo2ufhgem57clbscxetxgevue
kube-1password/vault: Kubernetes
kube-1password/password-key: token
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: git-sync-gitea
namespace: git-sync
annotations:
kube-1password: b7kpdlcvt7y63bozu3i4j4lojm
kube-1password/vault: Kubernetes
kube-1password/password-key: token
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: git-sync-gitlab
namespace: git-sync
annotations:
kube-1password: t47v3xdgadiifgoi4wmqibrlty
kube-1password/vault: Kubernetes
kube-1password/password-key: token
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: git-sync-bitbucket
namespace: git-sync
annotations:
kube-1password: adrki45krr2tq34sug7dhdk5iy
kube-1password/vault: Kubernetes
kube-1password/password-key: token
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: git-sync-codeberg
namespace: git-sync
annotations:
kube-1password: 5ynzgk6qcgshztkjbddwalixfq
kube-1password/vault: Kubernetes
kube-1password/password-key: token
type: Opaque
data:
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: git-sync
namespace: git-sync
spec:
schedule: "0 */1 * * *"
concurrencyPolicy: Forbid
failedJobsHistoryLimit: 1
successfulJobsHistoryLimit: 1
jobTemplate:
metadata:
labels:
cronjob: git-sync
spec:
backoffLimit: 1
template:
spec:
containers:
- name: sync
image: rg.fr-par.scw.cloud/averagemarcus/git-sync:latest
imagePullPolicy: Always
env:
- name: GITHUB_TOKEN
valueFrom:
secretKeyRef:
name: git-sync-github
key: token
- name: GITEA_TOKEN
valueFrom:
secretKeyRef:
name: git-sync-gitea
key: token
- name: GITLAB_TOKEN
valueFrom:
secretKeyRef:
name: git-sync-gitlab
key: token
- name: BITBUCKET_TOKEN
valueFrom:
secretKeyRef:
name: git-sync-bitbucket
key: token
- name: CODEBERG_TOKEN
valueFrom:
secretKeyRef:
name: git-sync-codeberg
key: token
restartPolicy: Never

View File

@ -1,4 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
name: gitea
---
apiVersion: v1
kind: Secret
metadata:
name: gitea-secret-key
@ -42,7 +47,7 @@ spec:
spec:
containers:
- name: git
image: gitea/gitea:1.22.3
image: gitea/gitea:1.11
env:
- name: APP_NAME
value: "Git"
@ -64,8 +69,6 @@ spec:
value: "20"
- name: DEFAULT_THEME
value: arc-green
- name: ALLOWED_HOST_LIST
value: "*"
- name: SECRET_KEY
valueFrom:
secretKeyRef:
@ -77,6 +80,7 @@ spec:
resources:
requests:
memory: 400Mi
volumeMounts:
- mountPath: /data
name: git-data
@ -90,17 +94,17 @@ spec:
requests:
storage: 20Gi
---
apiVersion: networking.k8s.io/v1
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: git
namespace: gitea
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- git.cluster.fun
@ -110,9 +114,6 @@ spec:
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: git
port:
number: 80
serviceName: git
servicePort: 80

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: git
name: git-data-git-0
namespace: gitea
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: sbs-default-retain

View File

@ -1,70 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: goplayground
namespace: goplayground
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: goplayground
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: goplayground
namespace: goplayground
spec:
replicas: 1
selector:
matchLabels:
app: goplayground
template:
metadata:
labels:
app: goplayground
spec:
containers:
- name: web
image: x1unix/go-playground:2.3.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8000
name: web
resources:
limits:
memory: 20Mi
requests:
memory: 20Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: goplayground
namespace: goplayground
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- go.cluster.fun
secretName: goplayground-ingress
rules:
- host: go.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: goplayground
port:
number: 80

View File

@ -0,0 +1,57 @@
apiVersion: v1
kind: Namespace
metadata:
name: harbor
---
apiVersion: v1
kind: Secret
metadata:
name: harbor-values
namespace: harbor
annotations:
kube-1password: igey7vjjiqmj25v64eck7cyj34
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: values.yaml
type: Opaque
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: harbor
namespace: harbor
spec:
chart:
repository: https://helm.goharbor.io
name: harbor
version: 1.3.2
maxHistory: 4
skipCRDs: false
valuesFrom:
- secretKeyRef:
name: harbor-values
namespace: harbor
key: values.yaml
optional: false
values:
portal:
resources:
requests:
memory: 64Mi
core:
resources:
requests:
memory: 64Mi
jobservice:
resources:
requests:
memory: 64Mi
registry:
registry:
resources:
requests:
memory: 64Mi
controller:
resources:
requests:
memory: 64Mi

103
manifests/inlets.yaml Normal file
View File

@ -0,0 +1,103 @@
apiVersion: v1
kind: Namespace
metadata:
name: inlets
---
apiVersion: v1
kind: Secret
metadata:
name: inlets
namespace: inlets
annotations:
kube-1password: podju6t2s2osc3vbkimyce25ti
kube-1password/vault: Kubernetes
kube-1password/password-key: token
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: inlets
namespace: inlets
labels:
app: inlets
spec:
type: ClusterIP
ports:
- port: 80
protocol: TCP
targetPort: 8000
selector:
app: inlets
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: inlets
namespace: inlets
labels:
app: inlets
spec:
replicas: 1
selector:
matchLabels:
app: inlets
template:
metadata:
labels:
app: inlets
spec:
containers:
- name: inlets
image: inlets/inlets:2.7.0
imagePullPolicy: Always
command: ["inlets"]
args:
- "server"
- "--token-from=/var/inlets/token"
volumeMounts:
- name: inlets-token-volume
mountPath: /var/inlets/
volumes:
- name: inlets-token-volume
secret:
secretName: inlets
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: inlets
namespace: inlets
spec:
rules:
- host: inlets.cluster.fun
http:
paths:
- path: /
backend:
serviceName: inlets
servicePort: 80
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: pyload
namespace: inlets
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- pyload.cluster.fun
secretName: pyload-ingress
rules:
- host: pyload.cluster.fun
http:
paths:
- path: /
backend:
serviceName: inlets
servicePort: 80

107
manifests/kube-janitor.yaml Normal file
View File

@ -0,0 +1,107 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-janitor
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-janitor
namespace: kube-janitor
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-janitor
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- "*"
resources:
- "*"
verbs:
- get
- watch
- list
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-janitor
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-janitor
subjects:
- kind: ServiceAccount
name: kube-janitor
namespace: kube-janitor
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-janitor
namespace: kube-janitor
data:
rules.yaml: |-
rules:
- id: tekton-tasks
resources:
- pods
- pipelineruns
jmespath: "(metadata.labels.\"tekton.dev/pipeline\")"
ttl: 3h
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
application: kube-janitor
version: v20.4.1
name: kube-janitor
namespace: kube-janitor
spec:
replicas: 1
selector:
matchLabels:
application: kube-janitor
template:
metadata:
labels:
application: kube-janitor
version: v20.4.1
spec:
serviceAccountName: kube-janitor
containers:
- name: janitor
image: hjacobs/kube-janitor:20.4.1
args:
- --interval=15
- --rules-file=/config/rules.yaml
- --include-namespaces=tekton-pipelines
- --include-resources=pods
resources:
limits:
memory: 100Mi
requests:
memory: 100Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- name: config-volume
mountPath: /config
volumes:
- name: config-volume
configMap:
name: kube-janitor

View File

@ -1,105 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: urls-map
namespace: link
labels:
app: link
data:
urls.yaml: |
mn: https://marcusnoble.co.uk
whites: https://twitter.com/whites11/status/1484053621448785920
devopsnotts22: https://noti.st/averagemarcus/E8Ldoh/managing-kubernetes-without-losing-your-cool
kubernetes-cool: https://noti.st/averagemarcus/E8Ldoh/managing-kubernetes-without-losing-your-cool
klustered: https://gist.githubusercontent.com/AverageMarcus/e58301ecf3455caa1638c3ffe70ed138/raw/klustered.sh
wonders-and-woes: https://noti.st/averagemarcus/sWywEJ/the-wonders-and-woes-of-webhooks
kubehuddle: https://noti.st/averagemarcus/TqCEd4/the-wonders-and-woes-of-webhooks
kcduk: https://noti.st/averagemarcus/fxN4gl/managing-kubernetes-without-losing-your-cool
wonders-and-woes-webinar: https://noti.st/averagemarcus/Hw2IXG/the-wonders-and-woes-of-webhooks
kcdukraine: https://noti.st/averagemarcus/quuysq/managing-kubernetes-without-losing-your-cool
devopsox23: https://noti.st/averagemarcus/quuysq/managing-kubernetes-without-losing-your-cool
dddem23: https://noti.st/averagemarcus/Rt4hFh/managing-kubernetes-without-losing-your-cool
kube-london: https://noti.st/averagemarcus/SFD1bY/the-wonders-and-woes-of-webhooks
kcduk23: https://noti.st/averagemarcus/4YvpTx/webhooks-whats-the-worst-that-could-happen
rejekts23: https://noti.st/averagemarcus/Bi7qLP/webhooks-whats-the-worst-that-could-happen
rejekts24: https://speaking.marcusnoble.co.uk/pg46DB/from-fragile-to-resilient-validatingadmissionpolicies-strengthen-kubernetes
lopug24: https://speaking.marcusnoble.co.uk/I6dyx4/webhooks-whats-the-worst-that-could-happen
kcduk24: https://speaking.marcusnoble.co.uk/0qcuN9/from-fragile-to-resilient-validatingadmissionpolicies-strengthen-kubernetes
rejektsna24: https://speaking.marcusnoble.co.uk/dALiFY/from-fragile-to-resilient-validatingadmissionpolicies-strengthen-kubernetes
kcddk24: https://speaking.marcusnoble.co.uk/FU4W7x/from-fragile-to-resilient-validatingadmissionpolicies-strengthen-kubernetes
---
apiVersion: v1
kind: Service
metadata:
name: link
namespace: link
labels:
app: link
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: link
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: link
namespace: link
labels:
app: link
spec:
replicas: 1
selector:
matchLabels:
app: link
template:
metadata:
labels:
app: link
spec:
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/link:latest
imagePullPolicy: Always
ports:
- containerPort: 5050
name: web
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
configMap:
name: urls-map
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: link
namespace: link
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- go-get.link
secretName: link-ingress
rules:
- host: go-get.link
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: link
port:
number: 80

114
manifests/linx-server.yaml Normal file
View File

@ -0,0 +1,114 @@
apiVersion: v1
kind: Namespace
metadata:
name: linx-server
---
apiVersion: v1
kind: ConfigMap
metadata:
name: linx-server
namespace: linx-server
data:
linx-server.conf: |-
sitename = share
maxsize = 524288000
maxexpiry = 0
selifpath = f
nologs = false
force-random-filename = false
s3-endpoint = https://s3.fr-par.scw.cloud
s3-region = fr-par
s3-bucket = cluster.fun-linx
---
apiVersion: v1
kind: Secret
metadata:
name: linx-server-s3
namespace: linx-server
annotations:
kube-1password: d5dgclm3qrxd4fntivv26ec3ee
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: linx-server
namespace: linx-server
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: linx-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: linx-server
namespace: linx-server
spec:
replicas: 2
selector:
matchLabels:
app: linx-server
template:
metadata:
labels:
app: linx-server
spec:
containers:
- name: web
image: andreimarcu/linx-server:version-2.3.5
imagePullPolicy: Always
args:
- -config
- /config/linx-server.conf
ports:
- containerPort: 8080
name: web
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: linx-server-s3
key: username
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: linx-server-s3
key: password
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
configMap:
name: linx-server
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: linx-server
namespace: linx-server
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- share.cluster.fun
secretName: linx-server-ingress
rules:
- host: share.cluster.fun
http:
paths:
- path: /
backend:
serviceName: linx-server
servicePort: 80

175
manifests/loki_chart.yaml Normal file
View File

@ -0,0 +1,175 @@
apiVersion: v1
kind: Namespace
metadata:
name: logging
---
apiVersion: v1
kind: Secret
metadata:
name: grafana-credentials
namespace: logging
annotations:
kube-1password: wpynfxkdipeeacyfxkvtdsuj54
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: loki
namespace: logging
spec:
chart:
repository: https://grafana.github.io/loki/charts
name: loki-stack
version: 0.36.2
maxHistory: 4
skipCRDs: false
values:
fluent-bit:
enabled: "true"
promtail:
enabled: "true"
loki:
persistence:
enabled: "true"
size: 10Gi
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: grafana
namespace: logging
spec:
chart:
repository: https://kubernetes-charts.storage.googleapis.com
name: grafana
version: 5.0.22
maxHistory: 4
skipCRDs: false
values:
image:
tag: 7.0.0
admin:
existingSecret: "grafana-credentials"
userKey: username
passwordKey: password
persistence:
enabled: "false"
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Loki
type: loki
url: http://logging-loki.logging:3100
access: proxy
jsonData:
maxLines: 1000
---
apiVersion: v1
kind: Secret
metadata:
name: grafana-auth
namespace: logging
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-auth
namespace: logging
labels:
app: grafana-auth
spec:
replicas: 1
selector:
matchLabels:
app: grafana-auth
template:
metadata:
labels:
app: grafana-auth
spec:
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://logging-grafana.logging.svc.cluster.local
- --http-address=$(HOST_IP):8080
- --redirect-url=https://grafana.cluster.fun/oauth2/callback
- --email-domain=marcusnoble.co.uk
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQN
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: grafana-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: grafana-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v5.1.1
name: oauth-proxy
ports:
- containerPort: 8080
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: grafana-auth
namespace: logging
labels:
app: grafana-auth
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: grafana-auth
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: grafana-auth
namespace: logging
labels:
app: grafana-auth
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- grafana.cluster.fun
secretName: grafana-ingress
rules:
- host: grafana.cluster.fun
http:
paths:
- path: /
backend:
serviceName: grafana-auth
servicePort: 80

View File

@ -1,90 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: marcusnoble
namespace: marcusnoble
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
name: web
selector:
app: marcusnoble
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: marcusnoble
namespace: marcusnoble
spec:
replicas: 1
selector:
matchLabels:
app: marcusnoble
template:
metadata:
labels:
app: marcusnoble
spec:
containers:
- name: web
image: rg.fr-par.scw.cloud/averagemarcus/marcusnoble:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: web
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
# livenessProbe:
# httpGet:
# path: /healthz
# port: web
# initialDelaySeconds: 10
# readinessProbe:
# httpGet:
# path: /healthz
# port: web
# initialDelaySeconds: 10
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: marcusnoble
namespace: marcusnoble
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- marcusnoble.com
- www.marcusnoble.com
secretName: marcusnoble-ingress
rules:
- host: marcusnoble.com
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: marcusnoble
port:
number: 80
- host: www.marcusnoble.com
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: marcusnoble
port:
number: 80
---

View File

@ -1,229 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: docker-config
namespace: mastodon-digest
annotations:
kube-1password: i6ngbk5zf4k52xgwdwnfup5bby
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: .dockerconfigjson
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: e30=
---
apiVersion: v1
kind: Secret
metadata:
name: mastodon-digest-auth
namespace: mastodon-digest
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: mastodon-digest
namespace: mastodon-digest
annotations:
kube-1password: bfklz3yi3dn4e7xtsbttcvhata
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config
namespace: mastodon-digest
labels:
app: mastodon-digest
data:
config.json: |
[
{
"timeline": "home",
"hours": 12,
"scorer": "ExtendedSimpleWeighted",
"threshold": "lax",
"output": "/usr/share/nginx/html/home/"
},
{
"timeline": "federated",
"hours": 12,
"scorer": "ExtendedSimpleWeighted",
"threshold": "lax",
"output": "/usr/share/nginx/html/federated/"
}
]
---
apiVersion: v1
kind: ConfigMap
metadata:
name: index
namespace: mastodon-digest
labels:
app: mastodon-digest
data:
index.html: |
<!DOCTYPE html>
<html lang="en">
<head>
<meta chartset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Mastodon Digest</title>
<style>
body { background-color: #292c36; font-family: "Arial", sans-serif; }
div#container { margin: auto; max-width: 640px; padding: 10px; text-align: center; margin: 0 auto; }
.links { align: center; }
h1 { color: white; }
a.button { background: #595aff; color: #fff; line-height: 1.2; min-height: 38px; min-width: 88px; padding: 0 30px; border: 0; border-radius: 6px;; display: inline-flex; justify-content: center; align-items: center; }
</style>
</head>
<body>
<div id="container">
<h1>Mastodon Digest</h1>
<section class="links">
<a href="home/" class="button">Home</a>
<a href="federated/" class="button">Federated</a>
</section>
</div>
</body>
</html>
---
apiVersion: v1
kind: Service
metadata:
name: mastodon-digest
namespace: mastodon-digest
spec:
type: ClusterIP
ports:
- port: 80
targetPort: auth
name: web
selector:
app: mastodon-digest
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mastodon-digest
namespace: mastodon-digest
spec:
replicas: 1
selector:
matchLabels:
app: mastodon-digest
template:
metadata:
labels:
app: mastodon-digest
spec:
imagePullSecrets:
- name: docker-config
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://localhost:80
- --http-address=$(HOST_IP):8000
- --redirect-url=https://mastodon-digest.cluster.fun/oauth2/callback
- --email-domain=marcusnoble.co.uk
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQNFT
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: mastodon-digest-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: mastodon-digest-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v7.7.1
name: oauth-proxy
ports:
- containerPort: 8000
protocol: TCP
name: auth
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
- name: web
image: nginx:stable
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: web
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
- name: index
mountPath: /usr/share/nginx/html/index.html
subPath: index.html
- name: digest
image: rg.fr-par.scw.cloud/averagemarcus-private/mastodon-digest:latest
imagePullPolicy: Always
env:
- name: CONFIG_FILE
value: /config.json
envFrom:
- secretRef:
name: mastodon-digest
volumeMounts:
- name: config
mountPath: /config.json
subPath: config.json
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
emptyDir: {}
- name: config
configMap:
name: config
- name: index
configMap:
name: index
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mastodon-digest
namespace: mastodon-digest
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- mastodon-digest.cluster.fun
secretName: mastodon-digest-ingress
rules:
- host: mastodon-digest.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: mastodon-digest
port:
number: 80

View File

@ -1,151 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: docker-config
namespace: mastodon-to-airtable
annotations:
kube-1password: i6ngbk5zf4k52xgwdwnfup5bby
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: .dockerconfigjson
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: e30=
---
apiVersion: v1
kind: Secret
metadata:
name: mastodon-to-airtable-auth
namespace: mastodon-to-airtable
annotations:
kube-1password: mr6spkkx7n3memkbute6ojaarm
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: mastodon-to-airtable
namespace: mastodon-to-airtable
annotations:
kube-1password: kizmkmbndgu3ryrox3csev4mim
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: mastodon-to-airtable
namespace: mastodon-to-airtable
spec:
type: ClusterIP
ports:
- port: 80
targetPort: auth
name: web
selector:
app: mastodon-to-airtable
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mastodon-to-airtable
namespace: mastodon-to-airtable
spec:
replicas: 1
selector:
matchLabels:
app: mastodon-to-airtable
template:
metadata:
labels:
app: mastodon-to-airtable
spec:
imagePullSecrets:
- name: docker-config
containers:
- args:
- --cookie-secure=false
- --provider=oidc
- --provider-display-name=Auth0
- --upstream=http://localhost:8080
- --http-address=$(HOST_IP):8000
- --redirect-url=https://mastodon-to-airtable.cluster.fun/oauth2/callback
- --email-domain=marcusnoble.co.uk
- --pass-basic-auth=false
- --pass-access-token=false
- --oidc-issuer-url=https://marcusnoble.eu.auth0.com/
- --cookie-secret=KDGD6rrK6cBmryyZ4wcJ9xAUNW9AQNFT
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
key: username
name: mastodon-to-airtable-auth
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: password
name: mastodon-to-airtable-auth
image: quay.io/oauth2-proxy/oauth2-proxy:v7.7.1
name: oauth-proxy
ports:
- containerPort: 8000
protocol: TCP
name: auth
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
- name: web
image: rg.fr-par.scw.cloud/averagemarcus-private/mastodon-to-airtable:latest
imagePullPolicy: Always
env:
- name: PORT
value: "8080"
envFrom:
- secretRef:
name: "mastodon-to-airtable"
ports:
- containerPort: 8080
name: web
resources:
limits:
memory: 50Mi
requests:
memory: 50Mi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mastodon-to-airtable
namespace: mastodon-to-airtable
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.tls: "true"
ingress.kubernetes.io/ssl-redirect: "true"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- mastodon-to-airtable.cluster.fun
secretName: mastodon-to-airtable-ingress
rules:
- host: mastodon-to-airtable.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: mastodon-to-airtable
port:
number: 80

255
manifests/matrix_chart.yaml Normal file
View File

@ -0,0 +1,255 @@
apiVersion: v1
kind: Namespace
metadata:
name: chat
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: matrix
namespace: chat
spec:
chart:
repository: https://dacruz21.github.io/helm-charts
name: matrix
version: 1.1.2
maxHistory: 4
values:
matrix:
serverName: "matrix.cluster.fun"
telemetry: false
hostname: "matrix.cluster.fun"
presence: true
blockNonAdminInvites: false
search: true
adminEmail: "matrix@marcusnoble.co.uk"
uploads:
maxSize: 100M
maxPixels: 32M
federation:
enabled: false
allowPublicRooms: false
blacklist:
- '127.0.0.0/8'
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- '::1/128'
- 'fe80::/64'
- 'fc00::/7'
registration:
enabled: false
allowGuests: false
urlPreviews:
enabled: true
rules:
maxSize: 4M
ip:
blacklist:
- '127.0.0.0/8'
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/16'
- '100.64.0.0/10'
- '169.254.0.0/16'
- '::1/128'
- 'fe80::/64'
- 'fc00::/7'
volumes:
media:
capacity: 4Gi
signingKey:
capacity: 1Gi
postgresql:
enabled: true
persistence:
size: 4Gi
synapse:
image:
repository: "matrixdotorg/synapse"
tag: v1.12.4
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
replicaCount: 1
resources: {}
riot:
enabled: true
integrations:
enabled: true
ui: "https://scalar.vector.im/"
api: "https://scalar.vector.im/api"
widgets:
- "https://scalar.vector.im/_matrix/integrations/v1"
- "https://scalar.vector.im/api"
- "https://scalar-staging.vector.im/_matrix/integrations/v1"
- "https://scalar-staging.vector.im/api"
- "https://scalar-staging.riot.im/scalar/api"
# Experimental features in riot-web, see https://github.com/vector-im/riot-web/blob/develop/docs/labs.md
labs:
- feature_pinning
- feature_custom_status
- feature_state_counters
- feature_many_integration_managers
- feature_mjolnir
- feature_dm_verification
- feature_bridge_state
- feature_presence_in_room_list
- feature_custom_themes
# Servers to show in the Explore menu (the current server is always shown)
roomDirectoryServers: []
# Prefix before permalinks generated when users share links to rooms, users, or messages. If running an unfederated Synapse, set the below to the URL of your Riot instance.
permalinkPrefix: "https://chat.cluster.fun"
image:
repository: "vectorim/riot-web"
tag: v1.6.0
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
replicaCount: 1
resources: {}
# Settings for Coturn TURN relay, used for routing voice calls
coturn:
enabled: false
mail:
enabled: false
relay:
enabled: false
bridges:
irc:
enabled: false
whatsapp:
enabled: false
discord:
enabled: false
networkPolicies:
enabled: false
ingress:
enabled: false
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: matrix
namespace: chat
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- matrix.cluster.fun
secretName: matrix-ingress
rules:
- host: matrix.cluster.fun
http:
paths:
- path: /.well-known/matrix
backend:
serviceName: well-known
servicePort: 80
- path: /
backend:
serviceName: chat-matrix-synapse
servicePort: 80
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: riot
namespace: chat
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
spec:
tls:
- hosts:
- chat.cluster.fun
secretName: riot-ingress
rules:
- host: chat.cluster.fun
http:
paths:
- path: /
backend:
serviceName: chat-matrix-riot
servicePort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: well-known
namespace: chat
spec:
replicas: 1
selector:
matchLabels:
app: well-known
template:
metadata:
labels:
app: well-known
spec:
containers:
- name: web
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: web
volumeMounts:
- name: well-known
mountPath: /usr/share/nginx/html/.well-known/matrix
volumes:
- name: well-known
configMap:
name: well-known
---
apiVersion: v1
kind: Service
metadata:
name: well-known
namespace: chat
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
name: web
selector:
app: well-known
---
apiVersion: v1
kind: ConfigMap
metadata:
name: well-known
namespace: chat
data:
server: |-
{
"m.server": "matrix.cluster.fun:443"
}

View File

@ -1,554 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: matrix
namespace: chat
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx
tls:
- hosts:
- matrix.cluster.fun
secretName: matrix-ingress
rules:
- host: matrix.cluster.fun
http:
paths:
- path: /.well-known/matrix
pathType: ImplementationSpecific
backend:
service:
name: well-known
port:
number: 80
- path: /
pathType: ImplementationSpecific
backend:
service:
name: matrix-synapse
port:
number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: riot
namespace: chat
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx
tls:
- hosts:
- chat.cluster.fun
secretName: riot-ingress
rules:
- host: chat.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: matrix-riot
port:
number: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: well-known
namespace: chat
annotations:
configmap.reloader.stakater.com/reload: "well-known"
spec:
replicas: 1
selector:
matchLabels:
app: well-known
template:
metadata:
labels:
app: well-known
spec:
containers:
- name: web
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: web
volumeMounts:
- name: well-known
mountPath: /usr/share/nginx/html/.well-known/matrix
resources:
limits:
memory: 15Mi
requests:
memory: 15Mi
volumes:
- name: well-known
configMap:
name: well-known
---
apiVersion: v1
kind: Service
metadata:
name: well-known
namespace: chat
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
name: web
selector:
app: well-known
---
apiVersion: v1
kind: ConfigMap
metadata:
name: well-known
namespace: chat
data:
server: |-
{
"m.server": "matrix.cluster.fun:443"
}
client: |-
{
"m.homeserver": {
"base_url": "https://matrix.cluster.fun"
},
"org.matrix.msc3575.proxy": {
"url": "https://syncv3.matrix.cluster.fun"
}
}
---
# Source: matrix/templates/riot/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: matrix-riot-config
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: element
data:
config.json: |
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://matrix.cluster.fun"
}
},
"brand": "Element",
"branding": {},
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_widgets_urls": [
"https://scalar.vector.im/_matrix/integrations/v1",
"https://scalar.vector.im/api",
"https://scalar-staging.vector.im/_matrix/integrations/v1",
"https://scalar-staging.vector.im/api",
"https://scalar-staging.riot.im/scalar/api"
],
"showLabsSettings": true,
"features": {
"feature_pinning": true,
"feature_custom_status": "labs",
"feature_state_counters": "labs",
"feature_many_integration_managers": "labs",
"feature_mjolnir": "labs",
"feature_dm_verification": "labs",
"feature_bridge_state": "labs",
"feature_presence_in_room_list": true,
"feature_custom_themes": "labs",
"feature_new_spinner": "labs",
"feature_jump_to_date": "labs",
"feature_location_share_pin_drop": "labs",
"feature_location_share_live": "labs",
"feature_thread": true,
"feature_video_rooms": true,
"feature_favourite_messages": "labs"
},
"roomDirectory": {
"servers": []
},
"permalinkPrefix": "https://chat.cluster.fun",
"enable_presence_by_hs_url": {
"https://matrix.org": false,
"https://matrix-client.matrix.org": false
},
"map_style_url": "https://api.maptiler.com/maps/streets/style.json?key=2IerXP2a5g1e7hxxBbzs"
}
nginx.conf: |
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/pid/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
}
default.conf: |
server {
listen 8080;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
---
apiVersion: v1
kind: Secret
metadata:
name: matrix-synapse-config
namespace: chat
annotations:
kube-1password: wbj4oozwyx6m2zz5m42pgcmymy
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: homeserver.yaml
labels:
app.kubernetes.io/name: "matrix"
component: synapse
type: Opaque
---
apiVersion: v1
kind: ConfigMap
metadata:
name: matrix-synapse-config
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: element
data:
matrix.cluster.fun.log.config: |
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARNING
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARNING
root:
level: WARNING
handlers: [console]
---
# Source: matrix/templates/riot/service.yaml
apiVersion: v1
kind: Service
metadata:
name: matrix-riot
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: element
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: matrix-riot
---
# Source: matrix/templates/synapse/service.yaml
apiVersion: v1
kind: Service
metadata:
name: matrix-synapse
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: synapse
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/_synapse/metrics"
prometheus.io/port: "9000"
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
- port: 9000
targetPort: metrics
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: matrix-synapse
---
# Source: matrix/templates/riot/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix-riot
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: element
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: matrix-riot
template:
metadata:
labels:
app.kubernetes.io/name: matrix-riot
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
containers:
- name: "riot"
image: "vectorim/element-web:v1.11.86"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
protocol: TCP
volumeMounts:
- mountPath: /app/config.json
name: riot-config
subPath: config.json
readOnly: true
- mountPath: /etc/nginx/nginx.conf
name: riot-config
subPath: nginx.conf
readOnly: true
- mountPath: /etc/nginx/conf.d/default.conf
name: riot-config
subPath: default.conf
readOnly: true
- mountPath: /var/cache/nginx
name: ephemeral
subPath: cache
- mountPath: /var/run/pid
name: ephemeral
subPath: pid
readinessProbe:
httpGet:
path: /
port: http
startupProbe:
httpGet:
path: /
port: http
livenessProbe:
httpGet:
path: /
port: http
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
volumes:
- name: riot-config
configMap:
name: matrix-riot-config
- name: ephemeral
emptyDir: {}
---
# Source: matrix/templates/synapse/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix-synapse
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: synapse
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: matrix-synapse
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: matrix-synapse
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
initContainers:
- name: generate-signing-key
image: "ghcr.io/element-hq/synapse:v1.119.0"
imagePullPolicy: IfNotPresent
env:
- name: SYNAPSE_SERVER_NAME
value: matrix.cluster.fun
- name: SYNAPSE_REPORT_STATS
value: "no"
command: ["python"]
args:
- "-m"
- "synapse.app.homeserver"
- "--config-path"
- "/data/homeserver.yaml"
- "--keys-directory"
- "/data/keys"
- "--generate-keys"
volumeMounts:
- name: synapse-config-homeserver
mountPath: /data/homeserver.yaml
subPath: homeserver.yaml
- name: synapse-config-logging
mountPath: /data/matrix.cluster.fun.log.config
subPath: matrix.cluster.fun.log.config
- name: signing-key
mountPath: /data/keys
containers:
- name: "synapse"
image: "ghcr.io/element-hq/synapse:v1.119.0"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8008
protocol: TCP
- name: metrics
containerPort: 9000
protocol: TCP
volumeMounts:
- name: synapse-config-homeserver
mountPath: /data/homeserver.yaml
subPath: homeserver.yaml
- name: mautrix-whatsapp-registration
mountPath: /data/mautrix-whatsapp-registration.yaml
subPath: registration.yaml
# - name: mautrix-signal-registration
# mountPath: /data/mautrix-signal-registration.yaml
# subPath: registration.yaml
# - name: mautrix-telegram-registration
# mountPath: /data/mautrix-telegram-registration.yaml
# subPath: registration.yaml
- name: synapse-config-logging
mountPath: /data/matrix.cluster.fun.log.config
subPath: matrix.cluster.fun.log.config
- name: signing-key
mountPath: /data/keys
- name: user-media
mountPath: /data/media_store
- name: uploads
mountPath: /data/uploads
- name: tmp
mountPath: /tmp
readinessProbe:
httpGet:
path: /_matrix/static/
port: http
periodSeconds: 10
timeoutSeconds: 5
startupProbe:
httpGet:
path: /_matrix/static/
port: http
failureThreshold: 6
periodSeconds: 5
timeoutSeconds: 5
livenessProbe:
httpGet:
path: /_matrix/static/
port: http
periodSeconds: 10
timeoutSeconds: 5
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
volumes:
- name: synapse-config-logging
configMap:
name: matrix-synapse-config
- name: synapse-config-homeserver
secret:
secretName: matrix-synapse-config
- name: mautrix-whatsapp-registration
secret:
secretName: mautrix-whatsapp-registration
# - name: mautrix-signal-registration
# secret:
# secretName: mautrix-signal-registration
# - name: mautrix-telegram-registration
# secret:
# secretName: mautrix-telegram-registration
- name: signing-key
persistentVolumeClaim:
claimName: chat-matrix-signing-key
- name: user-media
persistentVolumeClaim:
claimName: chat-matrix-user-media
- name: uploads
emptyDir: {}
- name: tmp
emptyDir: {}
---

View File

@ -1,32 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: chat-matrix-user-media
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: synapse
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 12Gi
storageClassName: sbs-default-retain
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: chat-matrix-signing-key
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: synapse
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: sbs-default-retain
---

View File

@ -1,153 +0,0 @@
# apiVersion: v1
# kind: Secret
# metadata:
# name: mautrix-signal-registration
# namespace: chat
# annotations:
# kube-1password: z6tylu2br724gttcpfyi5egaui
# kube-1password/vault: Kubernetes
# kube-1password/secret-text-key: registration.yaml
# labels:
# app.kubernetes.io/name: "mautrix-signal"
# component: registration
# type: Opaque
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: mautrix-signal-config
# namespace: chat
# annotations:
# kube-1password: 5vfaorcudozlq4clkzgmzzszqe
# kube-1password/vault: Kubernetes
# kube-1password/secret-text-key: config.yaml
# labels:
# app.kubernetes.io/name: "mautrix-signal"
# component: config
# type: Opaque
# ---
# apiVersion: v1
# kind: Service
# metadata:
# name: mautrix-signal
# namespace: chat
# labels:
# app.kubernetes.io/name: mautrix-signal
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/path: "/metrics"
# prometheus.io/port: "9000"
# spec:
# type: ClusterIP
# ports:
# - port: 29328
# targetPort: http
# protocol: TCP
# name: http
# selector:
# app.kubernetes.io/name: mautrix-signal
# ---
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# name: mautrix-signal
# labels:
# app.kubernetes.io/name: mautrix-signal
# spec:
# revisionHistoryLimit: 3
# replicas: 1
# strategy:
# type: Recreate
# selector:
# matchLabels:
# app.kubernetes.io/name: mautrix-signal
# template:
# metadata:
# labels:
# app.kubernetes.io/name: mautrix-signal
# spec:
# serviceAccountName: default
# automountServiceAccountToken: true
# dnsPolicy: ClusterFirst
# enableServiceLinks: true
# initContainers:
# - name: config-copy
# image: bash:latest
# imagePullPolicy: IfNotPresent
# args:
# - -c
# - |
# cp /secrets/* /data/
# volumeMounts:
# - name: mautrix-signal-config
# mountPath: /secrets/config.yaml
# subPath: config.yaml
# - name: mautrix-signal-registration
# mountPath: /secrets/registration.yaml
# subPath: registration.yaml
# - name: data
# mountPath: /data
# containers:
# - name: signald
# image: docker.io/signald/signald:stable
# imagePullPolicy: Always
# volumeMounts:
# - name: signald
# mountPath: /signald
# - name: mautrix-signal
# image: "dock.mau.dev/mautrix/signal:v0.4.3"
# imagePullPolicy: IfNotPresent
# env:
# - name: "TZ"
# value: "UTC"
# ports:
# - name: http
# containerPort: 29328
# protocol: TCP
# - name: metrics
# containerPort: 9000
# protocol: TCP
# volumeMounts:
# - name: signald
# mountPath: /signald
# - name: data
# mountPath: /data
# livenessProbe:
# tcpSocket:
# port: 29318
# initialDelaySeconds: 0
# failureThreshold: 3
# timeoutSeconds: 1
# periodSeconds: 10
# readinessProbe:
# tcpSocket:
# port: 29318
# initialDelaySeconds: 0
# failureThreshold: 3
# timeoutSeconds: 1
# periodSeconds: 10
# startupProbe:
# tcpSocket:
# port: 29318
# initialDelaySeconds: 0
# failureThreshold: 30
# timeoutSeconds: 1
# periodSeconds: 5
# volumes:
# - name: data
# emptyDir: {}
# - name: signald
# emptyDir: {}
# - name: mautrix-signal-config
# secret:
# secretName: mautrix-signal-config
# - name: mautrix-signal-registration
# secret:
# secretName: mautrix-signal-registration
# ---

View File

@ -1,119 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: matrix-sliding-sync
namespace: chat
annotations:
kube-1password: 7kvyfcszfaavj2d7uvl4troagm
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
labels:
app.kubernetes.io/name: "matrix"
component: sliding-sync
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sliding-sync
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: sliding-sync
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: sliding-sync
template:
metadata:
labels:
app.kubernetes.io/name: sliding-sync
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
containers:
- name: "sliding-sync"
image: "ghcr.io/matrix-org/sliding-sync:v0.99.19"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8008
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
env:
- name: SYNCV3_SERVER
value: https://matrix.cluster.fun
- name: SYNCV3_BINDADDR
value: ":8008"
- name: SYNCV3_PROM
value: ":9090"
- name: SYNCV3_SECRET
valueFrom:
secretKeyRef:
name: matrix-sliding-sync
key: SYNCV3_SECRET
- name: SYNCV3_DB
valueFrom:
secretKeyRef:
name: matrix-sliding-sync
key: SYNCV3_DB
---
apiVersion: v1
kind: Service
metadata:
name: sliding-sync
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: sliding-sync
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
name: web
- port: 9090
targetPort: metrics
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: sliding-sync
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sliding-sync
namespace: chat
labels:
app.kubernetes.io/name: "matrix"
component: sliding-sync
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx
tls:
- hosts:
- syncv3.matrix.cluster.fun
secretName: sliding-sync-ingress
rules:
- host: syncv3.matrix.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: sliding-sync
port:
number: 80
---

View File

@ -1,143 +0,0 @@
# apiVersion: v1
# kind: Secret
# metadata:
# name: mautrix-telegram-registration
# namespace: chat
# annotations:
# kube-1password: dancy7ogc4gjlxhfntqejgudwi
# kube-1password/vault: Kubernetes
# kube-1password/secret-text-key: registration.yaml
# labels:
# app.kubernetes.io/name: "mautrix-telegram"
# component: registration
# type: Opaque
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: mautrix-telegram-config
# namespace: chat
# annotations:
# kube-1password: nilzdpfum35hhwijnwvasbzmcq
# kube-1password/vault: Kubernetes
# kube-1password/secret-text-key: config.yaml
# labels:
# app.kubernetes.io/name: "mautrix-telegram"
# component: config
# type: Opaque
# ---
# apiVersion: v1
# kind: Service
# metadata:
# name: mautrix-telegram
# namespace: chat
# labels:
# app.kubernetes.io/name: mautrix-telegram
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/path: "/metrics"
# prometheus.io/port: "9000"
# spec:
# type: ClusterIP
# ports:
# - port: 29318
# targetPort: http
# protocol: TCP
# name: http
# selector:
# app.kubernetes.io/name: mautrix-telegram
# ---
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# name: mautrix-telegram
# labels:
# app.kubernetes.io/name: mautrix-telegram
# spec:
# revisionHistoryLimit: 3
# replicas: 1
# strategy:
# type: Recreate
# selector:
# matchLabels:
# app.kubernetes.io/name: mautrix-telegram
# template:
# metadata:
# labels:
# app.kubernetes.io/name: mautrix-telegram
# spec:
# serviceAccountName: default
# automountServiceAccountToken: true
# dnsPolicy: ClusterFirst
# enableServiceLinks: true
# initContainers:
# - name: config-copy
# image: bash:latest
# imagePullPolicy: IfNotPresent
# args:
# - -c
# - |
# cp /secrets/* /data/
# volumeMounts:
# - name: mautrix-telegram-config
# mountPath: /secrets/config.yaml
# subPath: config.yaml
# - name: mautrix-telegram-registration
# mountPath: /secrets/registration.yaml
# subPath: registration.yaml
# - name: data
# mountPath: /data
# containers:
# - name: mautrix-telegram
# image: "dock.mau.dev/mautrix/telegram:v0.12.1"
# imagePullPolicy: IfNotPresent
# env:
# - name: "TZ"
# value: "UTC"
# ports:
# - name: http
# containerPort: 29318
# protocol: TCP
# - name: metrics
# containerPort: 9000
# protocol: TCP
# volumeMounts:
# - name: data
# mountPath: /data
# livenessProbe:
# tcpSocket:
# port: 29318
# initialDelaySeconds: 0
# failureThreshold: 3
# timeoutSeconds: 1
# periodSeconds: 10
# readinessProbe:
# tcpSocket:
# port: 29318
# initialDelaySeconds: 0
# failureThreshold: 3
# timeoutSeconds: 1
# periodSeconds: 10
# startupProbe:
# tcpSocket:
# port: 29318
# initialDelaySeconds: 0
# failureThreshold: 30
# timeoutSeconds: 1
# periodSeconds: 5
# volumes:
# - name: data
# emptyDir: {}
# - name: mautrix-telegram-config
# secret:
# secretName: mautrix-telegram-config
# - name: mautrix-telegram-registration
# secret:
# secretName: mautrix-telegram-registration
# ---

View File

@ -1,143 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: mautrix-whatsapp-registration
namespace: chat
annotations:
kube-1password: x6lzkpyov4dem5jtk2kimyrnvy
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: registration.yaml
labels:
app.kubernetes.io/name: "mautrix-whatsapp"
component: registration
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: mautrix-whatsapp-config
namespace: chat
annotations:
kube-1password: ji3e2el66bu56bml3kq3ghyojq
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: config.yaml
labels:
app.kubernetes.io/name: "mautrix-whatsapp"
component: config
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
name: mautrix-whatsapp
namespace: chat
labels:
app.kubernetes.io/name: mautrix-whatsapp
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/path: "/metrics"
# prometheus.io/port: "9000"
spec:
type: ClusterIP
ports:
- port: 29318
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: mautrix-whatsapp
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mautrix-whatsapp
labels:
app.kubernetes.io/name: mautrix-whatsapp
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: mautrix-whatsapp
template:
metadata:
labels:
app.kubernetes.io/name: mautrix-whatsapp
spec:
serviceAccountName: default
automountServiceAccountToken: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
initContainers:
- name: config-copy
image: bash:latest
imagePullPolicy: IfNotPresent
args:
- -c
- |
cp /secrets/* /data/
volumeMounts:
- name: mautrix-whatsapp-config
mountPath: /secrets/config.yaml
subPath: config.yaml
- name: mautrix-whatsapp-registration
mountPath: /secrets/registration.yaml
subPath: registration.yaml
- name: data
mountPath: /data
containers:
- name: mautrix-whatsapp
image: "dock.mau.dev/mautrix/whatsapp:v0.11.0"
imagePullPolicy: IfNotPresent
env:
- name: "TZ"
value: "UTC"
ports:
- name: http
containerPort: 29318
protocol: TCP
# - name: metrics
# containerPort: 9000
# protocol: TCP
volumeMounts:
- name: data
mountPath: /data
livenessProbe:
tcpSocket:
port: 29318
initialDelaySeconds: 0
failureThreshold: 3
timeoutSeconds: 1
periodSeconds: 10
readinessProbe:
tcpSocket:
port: 29318
initialDelaySeconds: 0
failureThreshold: 3
timeoutSeconds: 1
periodSeconds: 10
startupProbe:
tcpSocket:
port: 29318
initialDelaySeconds: 0
failureThreshold: 30
timeoutSeconds: 1
periodSeconds: 5
volumes:
- name: data
emptyDir: {}
- name: mautrix-whatsapp-config
secret:
secretName: mautrix-whatsapp-config
- name: mautrix-whatsapp-registration
secret:
secretName: mautrix-whatsapp-registration
---

View File

@ -1,120 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: mealie
namespace: mealie
annotations:
kube-1password: 7ibib7oafxbxkvofnd4oxcr3qy
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie
namespace: mealie
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: mealie
template:
metadata:
labels:
app: mealie
spec:
containers:
- name: frontend
image: ghcr.io/mealie-recipes/mealie:v2.2.0
imagePullPolicy: Always
envFrom:
- secretRef:
name: mealie
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TOKEN_TIME
value: "168"
- name: DB_ENGINE
value: postgres
- name: POSTGRES_DB
value: mealie
- name: RECIPE_PUBLIC
value: "false"
- name: RECIPE_SHOW_NUTRITION
value: "true"
- name: RECIPE_SHOW_ASSETS
value: "true"
- name: RECIPE_LANDSCAPE_VIEW
value: "true"
- name: RECIPE_DISABLE_COMMENTS
value: "false"
- name: RECIPE_DISABLE_AMOUNT
value: "false"
- name: ALLOW_SIGNUP
value: "false"
- name: BASE_URL
value: "https://mealie.cluster.fun"
ports:
- containerPort: 9000
name: web
volumeMounts:
- mountPath: /app/data
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: mealie
---
apiVersion: v1
kind: Service
metadata:
name: mealie
namespace: mealie
spec:
type: ClusterIP
ports:
- port: 80
targetPort: web
name: web
selector:
app: mealie
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mealie
namespace: mealie
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
ingressClassName: nginx
tls:
- hosts:
- mealie.cluster.fun
secretName: mealie-ingress
rules:
- host: mealie.cluster.fun
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: mealie
port:
name: web

View File

@ -1,13 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mealie
namespace: mealie
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: sbs-default-retain
---

View File

@ -1,255 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-state-metrics
namespace: monitoring
labels:
app.kubernetes.io/name: kube-state-metrics
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
name: kube-state-metrics
rules:
- apiGroups: ["certificates.k8s.io"]
resources:
- certificatesigningrequests
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["list", "watch"]
- apiGroups: ["batch"]
resources:
- cronjobs
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- daemonsets
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- deployments
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- endpoints
verbs: ["list", "watch"]
- apiGroups: ["autoscaling"]
resources:
- horizontalpodautoscalers
verbs: ["list", "watch"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources:
- ingresses
verbs: ["list", "watch"]
- apiGroups: ["batch"]
resources:
- jobs
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- limitranges
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- mutatingwebhookconfigurations
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- namespaces
verbs: ["list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- nodes
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- persistentvolumeclaims
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- persistentvolumes
verbs: ["list", "watch"]
- apiGroups: ["policy"]
resources:
- poddisruptionbudgets
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- pods
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- replicasets
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- replicationcontrollers
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- resourcequotas
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- secrets
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- services
verbs: ["list", "watch"]
- apiGroups: ["apps"]
resources:
- statefulsets
verbs: ["list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources:
- storageclasses
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- validatingwebhookconfigurations
verbs: ["list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources:
- volumeattachments
verbs: ["list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: monitoring
---
apiVersion: v1
kind: Service
metadata:
name: kube-state-metrics
namespace: monitoring
labels:
app.kubernetes.io/name: kube-state-metrics
annotations:
prometheus.io/scrape: 'true'
spec:
type: "ClusterIP"
ports:
- name: "http"
protocol: TCP
port: 8080
targetPort: 8080
selector:
app.kubernetes.io/name: kube-state-metrics
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-state-metrics
namespace: monitoring
labels:
app.kubernetes.io/name: kube-state-metrics
spec:
selector:
matchLabels:
app.kubernetes.io/name: kube-state-metrics
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
spec:
serviceAccountName: kube-state-metrics
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsUser: 65534
containers:
- name: kube-state-metrics
args:
#- --resources=certificatesigningrequests
- --resources=configmaps
- --resources=cronjobs
- --resources=daemonsets
- --resources=deployments
#- --resources=endpoints
#- --resources=horizontalpodautoscalers
- --resources=ingresses
- --resources=jobs
#- --resources=limitranges
- --resources=mutatingwebhookconfigurations
- --resources=namespaces
#- --resources=networkpolicies
- --resources=nodes
- --resources=persistentvolumeclaims
- --resources=persistentvolumes
- --resources=poddisruptionbudgets
- --resources=pods
- --resources=replicasets
#- --resources=replicationcontrollers
#- --resources=resourcequotas
- --resources=secrets
- --resources=services
- --resources=statefulsets
- --resources=storageclasses
- --resources=validatingwebhookconfigurations
#- --resources=volumeattachments
imagePullPolicy: IfNotPresent
image: "registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.14.0"
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
---

View File

@ -1,64 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-server
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server
name: prometheus-server
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- nodes/metrics
- services
- endpoints
- pods
- ingresses
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses/status
- ingresses
verbs:
- get
- list
- watch
- nonResourceURLs:
- "/metrics"
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server
name: prometheus-server
subjects:
- kind: ServiceAccount
name: prometheus-server
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-server
---

View File

@ -1,292 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail
namespace: monitoring
labels:
app.kubernetes.io/name: promtail
---
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail
namespace: monitoring
labels:
app.kubernetes.io/name: promtail
data:
promtail.yaml: |
client:
backoff_config:
max_period: 5m
max_retries: 10
min_period: 500ms
batchsize: 1048576
batchwait: 1s
external_labels: {}
timeout: 10s
positions:
filename: /run/promtail/positions.yaml
server:
http_listen_port: 3101
clients:
- url: http://loki-distributed.proxy-civo.svc:80/loki/api/v1/push
external_labels:
kubernetes_cluster: civo
target_config:
sync_period: 10s
scrape_configs:
- job_name: kubernetes-pods
pipeline_stages:
- docker: {}
- cri: {}
- match:
selector: '{app="weave-net"}'
action: drop
- match:
selector: '{filename=~".*konnectivity.*"}'
action: drop
- match:
selector: '{name=~".*"} |~ ".*/healthz.*"'
action: drop
- match:
selector: '{name=~".*"} |~ ".*/api/health.*"'
action: drop
- match:
selector: '{name=~".*"} |~ ".*kube-probe/.*"'
action: drop
- match:
selector: '{app="internal-proxy"}'
action: drop
- match:
selector: '{app="non-auth-proxy"}'
action: drop
- match:
selector: '{app="vpa"}'
action: drop
- match:
selector: '{app="promtail"}'
action: drop
- match:
selector: '{app="csi-node"}'
action: drop
- match:
selector: '{app="victoria-metrics"}'
action: drop
- match:
selector: '{app="git-sync"}'
action: drop
- match:
selector: '{app="ingress-nginx"}'
stages:
- json:
expressions:
request_host: host
request_path: path
request_method: method
response_status: status
- drop:
source: "request_path"
value: "/healthz"
- drop:
source: "request_path"
value: "/health"
- labels:
request_host:
request_method:
response_status:
- match:
selector: '{app="traefik"}'
stages:
- json:
expressions:
request_host: RequestHost
request_path: RequestPath
request_method: RequestMethod
response_status: OriginStatus
- drop:
source: "request_path"
value: "/healthz"
- drop:
source: "request_path"
value: "/health"
- drop:
source: "request_path"
value: "/ping"
- labels:
request_host:
request_method:
response_status:
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_controller_name
regex: ([0-9a-z-.]+?)(-[0-9a-f]{8,10})?
action: replace
target_label: __tmp_controller_name
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_name
- __meta_kubernetes_pod_label_app
- __tmp_controller_name
- __meta_kubernetes_pod_name
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: app
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_component
- __meta_kubernetes_pod_label_component
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: component
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node_name
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
replacement: $1
separator: /
source_labels:
- namespace
- app
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- action: replace
replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- action: replace
replacement: /var/log/pods/*$1/*.log
regex: true/(.*)
separator: /
source_labels:
- __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash
- __meta_kubernetes_pod_annotation_kubernetes_io_config_hash
- __meta_kubernetes_pod_container_name
target_label: __path__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: promtail-clusterrole
labels:
app.kubernetes.io/name: promtail
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "watch", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: promtail-clusterrolebinding
labels:
app.kubernetes.io/name: promtail
subjects:
- kind: ServiceAccount
name: promtail
namespace: monitoring
roleRef:
kind: ClusterRole
name: promtail-clusterrole
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail
namespace: monitoring
labels:
app.kubernetes.io/name: promtail
annotations:
configmap.reloader.stakater.com/reload: "promtail"
spec:
selector:
matchLabels:
app.kubernetes.io/name: promtail
template:
metadata:
labels:
app.kubernetes.io/name: promtail
annotations:
prometheus.io/port: http-metrics
prometheus.io/scrape: "true"
spec:
serviceAccountName: promtail
containers:
- name: promtail
image: "grafana/promtail:2.9.10"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
volumeMounts:
- name: config
mountPath: /etc/promtail
- name: run
mountPath: /run/promtail
- mountPath: /var/lib/docker/containers
name: docker
readOnly: true
- mountPath: /var/log/pods
name: pods
readOnly: true
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 3101
name: http-metrics
securityContext:
readOnlyRootFilesystem: true
runAsGroup: 0
runAsUser: 0
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: config
configMap:
name: promtail
- name: run
hostPath:
path: /run/promtail
- hostPath:
path: /var/lib/docker/containers
name: docker
- hostPath:
path: /var/log/pods
name: pods
---

View File

@ -1,163 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: vmagent
namespace: monitoring
labels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
data:
prometheus.yml: |
global:
scrape_interval: 1m
external_labels:
source: civo
agent: vmagent
scrape_configs:
- job_name: 'vmagent'
static_configs:
- targets: ['localhost:8429']
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
job_name: kubernetes-nodes
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- replacement: kubernetes.default.svc:443
target_label: __address__
- regex: (.+)
replacement: /api/v1/nodes/$1/proxy/metrics
source_labels:
- __meta_kubernetes_node_name
target_label: __metrics_path__
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
- action: replace
regex: (https?)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
target_label: __scheme__
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- action: replace
source_labels:
- __meta_kubernetes_endpoint_port_name
target_label: kubernetes_endpoint_port_name
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: kubernetes_node
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: kubernetes_pod_name
- action: replace
source_labels:
- __meta_kubernetes_pod_container_port_name
target_label: kubernetes_port_name
- action: drop
regex: Pending|Succeeded|Failed
source_labels:
- __meta_kubernetes_pod_phase
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vmagent
namespace: monitoring
labels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
annotations:
configmap.reloader.stakater.com/reload: "vmagent"
spec:
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
spec:
serviceAccountName: prometheus-server
containers:
- name: vmagent
image: "victoriametrics/vmagent:v1.106.1"
imagePullPolicy: "IfNotPresent"
args:
- -remoteWrite.url=http://vmcluster.proxy-civo.svc/insert/0/prometheus/
- -remoteWrite.showURL
- -promscrape.config=/config/prometheus.yml
volumeMounts:
- name: config-volume
mountPath: /config
volumes:
- name: config-volume
configMap:
name: vmagent
---

View File

@ -1,255 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-state-metrics
namespace: monitoring
labels:
app.kubernetes.io/name: kube-state-metrics
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
name: kube-state-metrics
rules:
- apiGroups: ["certificates.k8s.io"]
resources:
- certificatesigningrequests
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["list", "watch"]
- apiGroups: ["batch"]
resources:
- cronjobs
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- daemonsets
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- deployments
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- endpoints
verbs: ["list", "watch"]
- apiGroups: ["autoscaling"]
resources:
- horizontalpodautoscalers
verbs: ["list", "watch"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources:
- ingresses
verbs: ["list", "watch"]
- apiGroups: ["batch"]
resources:
- jobs
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- limitranges
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- mutatingwebhookconfigurations
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- namespaces
verbs: ["list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- nodes
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- persistentvolumeclaims
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- persistentvolumes
verbs: ["list", "watch"]
- apiGroups: ["policy"]
resources:
- poddisruptionbudgets
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- pods
verbs: ["list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- replicasets
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- replicationcontrollers
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- resourcequotas
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- secrets
verbs: ["list", "watch"]
- apiGroups: [""]
resources:
- services
verbs: ["list", "watch"]
- apiGroups: ["apps"]
resources:
- statefulsets
verbs: ["list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources:
- storageclasses
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources:
- validatingwebhookconfigurations
verbs: ["list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources:
- volumeattachments
verbs: ["list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: monitoring
---
apiVersion: v1
kind: Service
metadata:
name: kube-state-metrics
namespace: monitoring
labels:
app.kubernetes.io/name: kube-state-metrics
annotations:
prometheus.io/scrape: 'true'
spec:
type: "ClusterIP"
ports:
- name: "http"
protocol: TCP
port: 8080
targetPort: 8080
selector:
app.kubernetes.io/name: kube-state-metrics
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-state-metrics
namespace: monitoring
labels:
app.kubernetes.io/name: kube-state-metrics
spec:
selector:
matchLabels:
app.kubernetes.io/name: kube-state-metrics
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
spec:
serviceAccountName: kube-state-metrics
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsUser: 65534
containers:
- name: kube-state-metrics
args:
#- --resources=certificatesigningrequests
- --resources=configmaps
- --resources=cronjobs
- --resources=daemonsets
- --resources=deployments
#- --resources=endpoints
#- --resources=horizontalpodautoscalers
- --resources=ingresses
- --resources=jobs
#- --resources=limitranges
- --resources=mutatingwebhookconfigurations
- --resources=namespaces
#- --resources=networkpolicies
- --resources=nodes
- --resources=persistentvolumeclaims
- --resources=persistentvolumes
- --resources=poddisruptionbudgets
- --resources=pods
- --resources=replicasets
#- --resources=replicationcontrollers
#- --resources=resourcequotas
- --resources=secrets
- --resources=services
- --resources=statefulsets
- --resources=storageclasses
- --resources=validatingwebhookconfigurations
#- --resources=volumeattachments
imagePullPolicy: IfNotPresent
image: "registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.14.0"
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
---

View File

@ -1,97 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-node-exporter
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: node-exporter
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: node-exporter
name: prometheus-node-exporter
namespace: monitoring
spec:
clusterIP: None
ports:
- name: metrics
port: 9100
protocol: TCP
targetPort: 9100
selector:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: node-exporter
type: "ClusterIP"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: node-exporter
name: prometheus-node-exporter
namespace: monitoring
spec:
selector:
matchLabels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: node-exporter
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: node-exporter
spec:
serviceAccountName: prometheus-node-exporter
containers:
- name: prometheus-node-exporter
image: "prom/node-exporter:v1.8.2"
imagePullPolicy: "IfNotPresent"
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --no-collector.wifi
- --no-collector.hwmon
- --no-collector.netclass
- --no-collector.arp
- --no-collector.bcache
- --no-collector.bonding
- --no-collector.btrfs
- --no-collector.dmi
- --no-collector.edac
- --no-collector.entropy
- --no-collector.fibrechannel
- --no-collector.infiniband
- --no-collector.tapestats
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
- --web.listen-address=:9100
ports:
- name: metrics
containerPort: 9100
hostPort: 9100
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
hostNetwork: true
hostPID: true
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
---

View File

@ -1,64 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-server
namespace: monitoring
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server
name: prometheus-server
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- nodes/metrics
- services
- endpoints
- pods
- ingresses
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses/status
- ingresses
verbs:
- get
- list
- watch
- nonResourceURLs:
- "/metrics"
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: prometheus
app.kubernetes.io/component: server
name: prometheus-server
subjects:
- kind: ServiceAccount
name: prometheus-server
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-server
---

View File

@ -1,271 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail
namespace: monitoring
labels:
app.kubernetes.io/name: promtail
---
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail
namespace: monitoring
labels:
app.kubernetes.io/name: promtail
data:
promtail.yaml: |
client:
backoff_config:
max_period: 5m
max_retries: 10
min_period: 500ms
batchsize: 1048576
batchwait: 1s
external_labels: {}
timeout: 10s
positions:
filename: /run/promtail/positions.yaml
server:
http_listen_port: 3101
clients:
- url: http://loki-distributed.auth-proxy.svc:80/loki/api/v1/push
external_labels:
kubernetes_cluster: scaleway
target_config:
sync_period: 10s
scrape_configs:
- job_name: kubernetes-pods
pipeline_stages:
- docker: {}
- cri: {}
- match:
selector: '{app="weave-net"}'
action: drop
- match:
selector: '{filename=~".*konnectivity.*"}'
action: drop
- match:
selector: '{name=~".*"} |~ ".*/healthz.*"'
action: drop
- match:
selector: '{name=~".*"} |~ ".*/api/health.*"'
action: drop
- match:
selector: '{name=~".*"} |~ ".*kube-probe/.*"'
action: drop
- match:
selector: '{app="internal-proxy"}'
action: drop
- match:
selector: '{app="non-auth-proxy"}'
action: drop
- match:
selector: '{app="vpa"}'
action: drop
- match:
selector: '{app="promtail"}'
action: drop
- match:
selector: '{app="csi-node"}'
action: drop
- match:
selector: '{app="victoria-metrics"}'
action: drop
- match:
selector: '{app="git-sync"}'
action: drop
- match:
selector: '{app="ingress-nginx"}'
stages:
- json:
expressions:
request_host: host
request_path: path
request_method: method
response_status: status
- drop:
source: "request_path"
value: "/healthz"
- drop:
source: "request_path"
value: "/health"
- labels:
request_host:
request_method:
response_status:
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_controller_name
regex: ([0-9a-z-.]+?)(-[0-9a-f]{8,10})?
action: replace
target_label: __tmp_controller_name
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_name
- __meta_kubernetes_pod_label_app
- __tmp_controller_name
- __meta_kubernetes_pod_name
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: app
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_component
- __meta_kubernetes_pod_label_component
regex: ^;*([^;]+)(;.*)?$
action: replace
target_label: component
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node_name
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
replacement: $1
separator: /
source_labels:
- namespace
- app
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- action: replace
replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
- action: replace
replacement: /var/log/pods/*$1/*.log
regex: true/(.*)
separator: /
source_labels:
- __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash
- __meta_kubernetes_pod_annotation_kubernetes_io_config_hash
- __meta_kubernetes_pod_container_name
target_label: __path__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: promtail-clusterrole
labels:
app.kubernetes.io/name: promtail
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "watch", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: promtail-clusterrolebinding
labels:
app.kubernetes.io/name: promtail
subjects:
- kind: ServiceAccount
name: promtail
namespace: monitoring
roleRef:
kind: ClusterRole
name: promtail-clusterrole
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail
namespace: monitoring
labels:
app.kubernetes.io/name: promtail
annotations:
configmap.reloader.stakater.com/reload: "promtail"
spec:
selector:
matchLabels:
app.kubernetes.io/name: promtail
template:
metadata:
labels:
app.kubernetes.io/name: promtail
annotations:
prometheus.io/port: http-metrics
prometheus.io/scrape: "true"
spec:
serviceAccountName: promtail
containers:
- name: promtail
image: "grafana/promtail:2.9.10"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
volumeMounts:
- name: config
mountPath: /etc/promtail
- name: run
mountPath: /run/promtail
- mountPath: /var/lib/docker/containers
name: docker
readOnly: true
- mountPath: /var/log/pods
name: pods
readOnly: true
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 3101
name: http-metrics
securityContext:
readOnlyRootFilesystem: true
runAsGroup: 0
runAsUser: 0
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: config
configMap:
name: promtail
- name: run
hostPath:
path: /run/promtail
- hostPath:
path: /var/lib/docker/containers
name: docker
- hostPath:
path: /var/log/pods
name: pods
---

View File

@ -1,170 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: vmagent
namespace: monitoring
labels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
data:
prometheus.yml: |
global:
scrape_interval: 1m
external_labels:
source: scaleway
agent: vmagent
scrape_configs:
- job_name: 'vmagent'
static_configs:
- targets: ['localhost:8429']
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
job_name: kubernetes-nodes
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- replacement: kubernetes.default.svc:443
target_label: __address__
- regex: (.+)
replacement: /api/v1/nodes/$1/proxy/metrics
source_labels:
- __meta_kubernetes_node_name
target_label: __metrics_path__
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- action: drop
source_labels: [__meta_kubernetes_pod_container_init]
regex: true
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
- action: replace
regex: (https?)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
target_label: __scheme__
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: kubernetes_node
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: drop
source_labels: [__meta_kubernetes_pod_container_init]
regex: true
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: kubernetes_pod_name
- action: drop
regex: Pending|Succeeded|Failed
source_labels:
- __meta_kubernetes_pod_phase
- job_name: 'node-exporter'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_endpoints_name]
regex: 'prometheus-node-exporter'
action: keep
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vmagent
namespace: monitoring
labels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
annotations:
configmap.reloader.stakater.com/reload: "vmagent"
spec:
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: victoria-metrics
app.kubernetes.io/component: agent
spec:
serviceAccountName: prometheus-server
containers:
- name: vmagent
image: "victoriametrics/vmagent:v1.106.1"
imagePullPolicy: "IfNotPresent"
args:
- -remoteWrite.url=http://vmcluster.auth-proxy.svc/insert/0/prometheus/
- -remoteWrite.showURL
- -promscrape.config=/config/prometheus.yml
- -promscrape.suppressDuplicateScrapeTargetErrors
volumeMounts:
- name: config-volume
mountPath: /config
volumes:
- name: config-volume
configMap:
name: vmagent
---

View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Namespace
metadata:
name: nextcloud
---
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-values
namespace: nextcloud
annotations:
kube-1password: v32a4zpuvhmxxrwmtmmv6526ry
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: values.yaml
type: Opaque
---
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: nextcloud
namespace: nextcloud
spec:
chart:
repository: https://kubernetes-charts.storage.googleapis.com
name: nextcloud
version: 1.10.0
maxHistory: 5
valuesFrom:
- secretKeyRef:
name: nextcloud-values
namespace: nextcloud
key: values.yaml
optional: false
values:
image:
tag: 18-apache
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/frontend-entry-points: http,https
traefik.ingress.kubernetes.io/redirect-entry-point: https
traefik.ingress.kubernetes.io/redirect-permanent: "true"
tls:
- hosts:
- nextcloud.cluster.fun
secretName: nextcloud-ingress
nextcloud:
host: nextcloud.cluster.fun
persistence:
enabled: true
storageClass: scw-bssd-retain
size: 5Gi
cronjob:
enabled: true
resources:
requests:
memory: 500Mi

View File

@ -1,416 +0,0 @@
---
# Source: nextcloud/charts/redis/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-nextcloud-redis
namespace: nextcloud
labels:
app: redis
release: "nextcloud-nextcloud"
annotations:
kube-1password: u54jxidod7tlnpwva37f5hcu5y
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
# Source: nextcloud/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
annotations:
kube-1password: iaz4xmtr2czpsjl6xirhryzfia
kube-1password/vault: Kubernetes
kube-1password/secret-text-parse: "true"
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: nextcloud-s3
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
annotations:
kube-1password: 7zanxzbyzfctc5d2yqfq6e5zcy
kube-1password/vault: Kubernetes
kube-1password/secret-text-key: s3.config.php
type: Opaque
---
# Source: nextcloud/templates/config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nextcloud-nextcloud-config
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
data:
general.config.php: |-
<?php
$CONFIG = array (
'overwriteprotocol' => 'https'
);
.htaccess: |-
# line below if for Apache 2.4
<ifModule mod_authz_core.c>
Require all denied
</ifModule>
# line below if for Apache 2.2
<ifModule !mod_authz_core.c>
deny from all
</ifModule>
# section for Apache 2.2 and 2.4
<ifModule mod_autoindex.c>
IndexIgnore *
</ifModule>
redis.config.php: |-
<?php
if (getenv('REDIS_HOST')) {
$CONFIG = array (
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.locking' => '\\OC\\Memcache\\Redis',
'redis' => array(
'host' => getenv('REDIS_HOST'),
'port' => getenv('REDIS_HOST_PORT') ?: 6379,
'password' => getenv('REDIS_HOST_PASSWORD'),
'dbindex' => getenv('REDIS_DB_INDEX') ?: 0,
),
);
}
apache-pretty-urls.config.php: |-
<?php
$CONFIG = array (
'htaccess.RewriteBase' => '/',
);
apcu.config.php: |-
<?php
$CONFIG = array (
'memcache.local' => '\\OC\\Memcache\\APCu',
);
apps.config.php: |-
<?php
$CONFIG = array (
"apps_paths" => array (
0 => array (
"path" => OC::$SERVERROOT."/apps",
"url" => "/apps",
"writable" => false,
),
1 => array (
"path" => OC::$SERVERROOT."/custom_apps",
"url" => "/custom_apps",
"writable" => true,
),
),
);
autoconfig.php: |-
<?php
$autoconfig_enabled = false;
if (getenv('SQLITE_DATABASE')) {
$AUTOCONFIG["dbtype"] = "sqlite";
$AUTOCONFIG["dbname"] = getenv('SQLITE_DATABASE');
$autoconfig_enabled = true;
} elseif (getenv('MYSQL_DATABASE') && getenv('MYSQL_USER') && getenv('MYSQL_PASSWORD') && getenv('MYSQL_HOST')) {
$AUTOCONFIG["dbtype"] = "mysql";
$AUTOCONFIG["dbname"] = getenv('MYSQL_DATABASE');
$AUTOCONFIG["dbuser"] = getenv('MYSQL_USER');
$AUTOCONFIG["dbpass"] = getenv('MYSQL_PASSWORD');
$AUTOCONFIG["dbhost"] = getenv('MYSQL_HOST');
$autoconfig_enabled = true;
} elseif (getenv('POSTGRES_DB') && getenv('POSTGRES_USER') && getenv('POSTGRES_PASSWORD') && getenv('POSTGRES_HOST')) {
$AUTOCONFIG["dbtype"] = "pgsql";
$AUTOCONFIG["dbname"] = getenv('POSTGRES_DB');
$AUTOCONFIG["dbuser"] = getenv('POSTGRES_USER');
$AUTOCONFIG["dbpass"] = getenv('POSTGRES_PASSWORD');
$AUTOCONFIG["dbhost"] = getenv('POSTGRES_HOST');
$autoconfig_enabled = true;
}
if ($autoconfig_enabled) {
$AUTOCONFIG["directory"] = getenv('NEXTCLOUD_DATA_DIR') ?: "/var/www/html/data";
}
smtp.config.php: |-
<?php
if (getenv('SMTP_HOST') && getenv('MAIL_FROM_ADDRESS') && getenv('MAIL_DOMAIN')) {
$CONFIG = array (
'mail_smtpmode' => 'smtp',
'mail_smtphost' => getenv('SMTP_HOST'),
'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25),
'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '',
'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'),
'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN',
'mail_smtpname' => getenv('SMTP_NAME') ?: '',
'mail_smtppassword' => getenv('SMTP_PASSWORD') ?: '',
'mail_from_address' => getenv('MAIL_FROM_ADDRESS'),
'mail_domain' => getenv('MAIL_DOMAIN'),
);
}
---
# Source: nextcloud/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/component: app
---
# Source: nextcloud/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
template:
metadata:
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
nextcloud-nextcloud-redis-client: "true"
spec:
containers:
- name: nextcloud
image: "nextcloud:30.0.2-apache"
imagePullPolicy: IfNotPresent
env:
- name: SQLITE_DATABASE
value: "nextcloud"
- name: NEXTCLOUD_ADMIN_USER
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud
key: nextcloud-username
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud
key: nextcloud-password
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: nextcloud.cluster.fun
- name: NEXTCLOUD_DATA_DIR
value: "/var/www/html/data"
- name: REDIS_HOST
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-host
- name: REDIS_PORT
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-port
- name: REDIS_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-password
- name: REDIS_DB_INDEX
valueFrom:
secretKeyRef:
name: nextcloud-nextcloud-redis
key: redis-db-index
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /status.php
port: http
httpHeaders:
- name: Host
value: "nextcloud.cluster.fun"
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /status.php
port: http
httpHeaders:
- name: Host
value: "nextcloud.cluster.fun"
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
# Cover case where upgrade is being performed
startupProbe:
httpGet:
path: /status.php
port: http
httpHeaders:
- name: Host
value: "nextcloud.cluster.fun"
failureThreshold: 30
periodSeconds: 10
resources:
requests:
memory: 450Mi
volumeMounts:
- name: nextcloud-data
mountPath: /var/www/
subPath: root
- name: nextcloud-data
mountPath: /var/www/html
subPath: html
- name: nextcloud-data
mountPath: /var/www/html/data
subPath: data
- name: nextcloud-data
mountPath: /var/www/html/config
subPath: config
- name: nextcloud-data
mountPath: /var/www/html/custom_apps
subPath: custom_apps
- name: nextcloud-data
mountPath: /var/www/tmp
subPath: tmp
- name: nextcloud-data
mountPath: /var/www/html/themes
subPath: themes
- name: nextcloud-config
mountPath: /var/www/html/config/general.config.php
subPath: general.config.php
- name: nextcloud-s3
mountPath: /var/www/html/config/s3.config.php
subPath: s3.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/.htaccess
subPath: .htaccess
- name: nextcloud-config
mountPath: /var/www/html/config/apache-pretty-urls.config.php
subPath: apache-pretty-urls.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/apcu.config.php
subPath: apcu.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/apps.config.php
subPath: apps.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/autoconfig.php
subPath: autoconfig.php
- name: nextcloud-config
mountPath: /var/www/html/config/redis.config.php
subPath: redis.config.php
- name: nextcloud-config
mountPath: /var/www/html/config/smtp.config.php
subPath: smtp.config.php
volumes:
- name: nextcloud-data
persistentVolumeClaim:
claimName: nextcloud-nextcloud-nextcloud
- name: nextcloud-config
configMap:
name: nextcloud-nextcloud-config
- name: nextcloud-s3
secret:
secretName: nextcloud-s3
# Will mount configuration files as www-data (id: 33) for nextcloud
securityContext:
fsGroup: 33
---
# Source: nextcloud/templates/cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: nextcloud-nextcloud-cron
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
annotations:
{}
spec:
schedule: "*/5 * * * *"
concurrencyPolicy: Forbid
failedJobsHistoryLimit: 5
successfulJobsHistoryLimit: 2
jobTemplate:
metadata:
labels:
app.kubernetes.io/name: nextcloud
spec:
template:
metadata:
labels:
app.kubernetes.io/name: nextcloud
spec:
restartPolicy: Never
containers:
- name: nextcloud
image: "nextcloud:30.0.2-apache"
imagePullPolicy: IfNotPresent
command: [ "curl" ]
args:
- "--fail"
- "-L"
- "https://nextcloud.cluster.fun/cron.php"
resources:
requests:
memory: 200Mi
---
# Source: nextcloud/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/component: app
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
rules:
- host: nextcloud.cluster.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nextcloud-nextcloud
port:
number: 8080
tls:
- hosts:
- nextcloud.cluster.fun
secretName: nextcloud-ingress

View File

@ -1,18 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nextcloud-nextcloud-nextcloud
labels:
app.kubernetes.io/name: nextcloud
helm.sh/chart: nextcloud-2.6.3
app.kubernetes.io/instance: nextcloud-nextcloud
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: app
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: sbs-default-retain
---

View File

@ -1,717 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resourceNames:
- ingress-nginx-leader
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data:
allow-snippet-annotations: "true"
use-proxy-protocol: "true"
log-format-upstream: '{"time": "$time_iso8601", "request_id": "$req_id", "remote_user": "$remote_user", "remote_addr_masked": "$remote_addr_masked", "bytes_sent": $bytes_sent, "request_time": $request_time, "status": $status, "host": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", "request_length": $request_length, "duration": $request_time,"method": "$request_method", "http_referrer": "$http_referer", "http_user_agent": "$http_user_agent", "redirect_location": "$redirect_location" }'
plugins: "redirect_location"
location-snippet: |
set $redirect_location '';
server-snippet: |
set_by_lua_block $remote_addr_masked {
local bit = require("bit")
local hval = 2166136261
local rem_addr = ngx.var.remote_addr
for w in rem_addr:gmatch(".") do
hval = bit.bxor(hval,string.byte(w))
hval = hval + bit.lshift(hval,1) + bit.lshift(hval,4) + bit.lshift(hval,7) + bit.lshift(hval,8) + bit.lshift(hval,24)
end
if hval < 0 then
hval = bit.bnot(hval)
end
local octet1 = bit.band(bit.rshift(hval,24), 255)
local octet2 = bit.band(bit.rshift(hval,16), 255)
local octet3 = bit.band(bit.rshift(hval,8), 255)
local octet4 = bit.band(hval, 255)
local op = octet1 .. "." .. octet2 .. "." .. octet3 .. "." .. octet4
return op
}
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
k8s.scw.cloud/ingress: nginx
k8s.scw.cloud/object: ConfigMap
k8s.scw.cloud/system: ingress
name: ingress-nginx-plugin-redirect-location
namespace: ingress-nginx
data:
main.lua: |
local ngx = ngx
local _M = {}
function _M.header_filter()
ngx.var.redirect_location = ngx.resp.get_headers()["Location"]
end
return _M
---
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: "true"
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: "true"
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
externalTrafficPolicy: Local
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
replicas: 2
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
spec:
containers:
- args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --annotations-prefix=nginx.ingress.kubernetes.io
- --watch-ingress-without-class
- --enable-metrics
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.11.3@sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
runAsUser: 101
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
- name: plugins
mountPath: /etc/nginx/lua/plugins/redirect_location
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
- name: plugins
configMap:
name: ingress-nginx-plugin-redirect-location
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission-create
spec:
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.5.1
name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None

Some files were not shown because too many files have changed in this diff Show More