Switch scaleway cluster to using new Tailscale proxy

Signed-off-by: Marcus Noble <github@marcusnoble.co.uk>
This commit is contained in:
Marcus Noble 2024-03-30 16:10:55 +00:00
parent c6ffad092b
commit e825fe3283
Signed by: AverageMarcus
GPG Key ID: B8F2DB8A7AEBAF78

View File

@ -16,49 +16,6 @@ data:
"loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local" "loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local"
} }
--- ---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-internal-proxy
namespace: auth-proxy
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale-internal-proxy"
roleRef:
kind: Role
name: tailscale-internal-proxy
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale-internal-proxy"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
@ -81,7 +38,7 @@ spec:
labels: labels:
app: internal-proxy app: internal-proxy
spec: spec:
serviceAccountName: tailscale-internal-proxy serviceAccountName: default
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
dnsConfig: dnsConfig:
nameservers: nameservers:
@ -95,49 +52,19 @@ spec:
value: talos.averagemarcus.github.beta.tailscale.net value: talos.averagemarcus.github.beta.tailscale.net
- name: PORT - name: PORT
value: "8080" value: "8080"
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_HOSTNAME
value: auth-proxy-internal-proxy
ports: ports:
- containerPort: 8080 - containerPort: 8080
protocol: TCP protocol: TCP
volumeMounts: volumeMounts:
- name: host-mappings - name: host-mappings
mountPath: /config/ mountPath: /config/
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.62
imagePullPolicy: Always
tty: true
env:
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_KUBE_SECRET
value: tailscale-internal-proxy
- name: TS_ACCEPT_DNS
value: "true"
- name: TS_EXTRA_ARGS
value: "--hostname=auth-proxy-internal-proxy"
securityContext:
capabilities:
add:
- NET_ADMIN
command:
- sh
- -c
- |
export PATH=$PATH:/tailscale/bin
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=${TS_ACCEPT_DNS} \
--authkey=${TS_AUTH_KEY} \
${TS_EXTRA_ARGS}
echo "Re-enabling incoming traffic from the cluster"
wait ${PID}
volumes: volumes:
- name: host-mappings - name: host-mappings
configMap: configMap: