cluster.fun/manifests/auth-proxy/internal-proxy.yaml

232 lines
5.0 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: host-mappings
namespace: auth-proxy
labels:
app: proxy
data:
mapping.json: |
{
"tekton-el.auth-proxy.svc": "tekton-el.cluster.local",
"home.auth-proxy.svc": "home.cluster.local",
"home.cluster.fun": "home.cluster.local",
"vmcluster.auth-proxy.svc": "vmcluster.cluster.local",
"loki.auth-proxy.svc": "loki-write.cluster.local",
"loki.auth-proxy.svc:80": "loki-write.cluster.local",
"loki-distributed.auth-proxy.svc": "loki-loki.cluster.local",
"loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local"
}
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-internal-proxy
namespace: auth-proxy
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale-internal-proxy"
roleRef:
kind: Role
name: tailscale-internal-proxy
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale-internal-proxy"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: internal-proxy
namespace: auth-proxy
labels:
app: internal-proxy
annotations:
configmap.reloader.stakater.com/reload: "host-mappings"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: internal-proxy
template:
metadata:
labels:
app: internal-proxy
spec:
serviceAccountName: tailscale-internal-proxy
dnsPolicy: ClusterFirst
dnsConfig:
nameservers:
- 100.100.100.100
containers:
- name: proxy
image: rg.fr-par.scw.cloud/averagemarcus/proxy:latest
imagePullPolicy: Always
env:
- name: PROXY_DESTINATION
value: talos.averagemarcus.github.beta.tailscale.net
- name: PORT
value: "8080"
ports:
- containerPort: 8080
protocol: TCP
volumeMounts:
- name: host-mappings
mountPath: /config/
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.29
imagePullPolicy: Always
tty: true
env:
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_KUBE_SECRET
value: tailscale-internal-proxy
- name: TS_ACCEPT_DNS
value: "true"
- name: TS_EXTRA_ARGS
value: "--hostname=auth-proxy-internal-proxy"
securityContext:
capabilities:
add:
- NET_ADMIN
command:
- sh
- -c
- |
export PATH=$PATH:/tailscale/bin
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=${TS_ACCEPT_DNS} \
--authkey=${TS_AUTH_KEY} \
${TS_EXTRA_ARGS}
echo "Re-enabling incoming traffic from the cluster"
wait ${PID}
volumes:
- name: host-mappings
configMap:
name: host-mappings
---
apiVersion: v1
kind: Service
metadata:
name: tekton-el
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki-distributed
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: vmcluster
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---