Updated tailscale proxies

Signed-off-by: Marcus Noble <github@marcusnoble.co.uk>
This commit is contained in:
Marcus Noble 2022-08-03 13:12:49 +01:00
parent 0dd5cb143f
commit 52c88621d4
Signed by: AverageMarcus
GPG Key ID: B8F2DB8A7AEBAF78
4 changed files with 427 additions and 199 deletions

View File

@ -23,6 +23,49 @@ metadata:
kube-1password/vault: Kubernetes
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-auth-proxy
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale-auth-proxy
labels:
app.kubernetes.io/name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale-auth-proxy
labels:
app.kubernetes.io/name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale-auth-proxy"
roleRef:
kind: Role
name: tailscale-auth-proxy
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale-auth-proxy
labels:
app.kubernetes.io/name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale-auth-proxy"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -32,6 +75,8 @@ metadata:
app: auth-proxy
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: auth-proxy
@ -40,10 +85,27 @@ spec:
labels:
app: auth-proxy
spec:
dnsPolicy: None
serviceAccountName: tailscale-auth-proxy
dnsPolicy: ClusterFirst
dnsConfig:
nameservers:
- 100.100.100.100
initContainers:
- name: sysctler
image: busybox
securityContext:
privileged: true
command: ["/bin/sh"]
args:
- -c
- |
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.forwarding=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
resources:
requests:
cpu: 1m
memory: 1Mi
containers:
- name: oauth-proxy
image: quay.io/oauth2-proxy/oauth2-proxy:v7.2.1
@ -84,14 +146,20 @@ spec:
requests:
memory: 50Mi
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.22
image: ghcr.io/tailscale/tailscale:v1.29
imagePullPolicy: Always
env:
- name: AUTH_KEY
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_KUBE_SECRET
value: tailscale-auth-proxy
- name: TS_ACCEPT_DNS
value: "true"
- name: TS_EXTRA_ARGS
value: "--hostname=auth-proxy-oauth2"
securityContext:
capabilities:
add:
@ -104,13 +172,13 @@ spec:
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --socket=/tmp/tailscaled.sock &
tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=true \
--authkey=${AUTH_KEY} \
--hostname=auth-proxy-oauth2
--accept-dns=${TS_ACCEPT_DNS} \
--authkey=${TS_AUTH_KEY} \
${TS_EXTRA_ARGS}
echo "Re-enabling incoming traffic from the cluster"
wait ${PID}
---

View File

@ -0,0 +1,231 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: host-mappings
namespace: auth-proxy
labels:
app: proxy
data:
mapping.json: |
{
"tekton-el.auth-proxy.svc": "tekton-el.cluster.local",
"home.auth-proxy.svc": "home.cluster.local",
"home.cluster.fun": "home.cluster.local",
"vmcluster.auth-proxy.svc": "vmcluster.cluster.local",
"loki.auth-proxy.svc": "loki-write.cluster.local",
"loki.auth-proxy.svc:80": "loki-write.cluster.local",
"loki-distributed.auth-proxy.svc": "loki-loki.cluster.local",
"loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local"
}
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-internal-proxy
namespace: auth-proxy
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale-internal-proxy"
roleRef:
kind: Role
name: tailscale-internal-proxy
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale-internal-proxy"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: internal-proxy
namespace: auth-proxy
labels:
app: internal-proxy
annotations:
configmap.reloader.stakater.com/reload: "host-mappings"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: internal-proxy
template:
metadata:
labels:
app: internal-proxy
spec:
serviceAccountName: tailscale-internal-proxy
dnsPolicy: ClusterFirst
dnsConfig:
nameservers:
- 100.100.100.100
containers:
- name: proxy
image: rg.fr-par.scw.cloud/averagemarcus/proxy:latest
imagePullPolicy: Always
env:
- name: PROXY_DESTINATION
value: talos.averagemarcus.github.beta.tailscale.net
- name: PORT
value: "8080"
ports:
- containerPort: 8080
protocol: TCP
volumeMounts:
- name: host-mappings
mountPath: /config/
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.29
imagePullPolicy: Always
tty: true
env:
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_KUBE_SECRET
value: tailscale-internal-proxy
- name: TS_ACCEPT_DNS
value: "true"
- name: TS_EXTRA_ARGS
value: "--hostname=auth-proxy-internal-proxy"
securityContext:
capabilities:
add:
- NET_ADMIN
command:
- sh
- -c
- |
export PATH=$PATH:/tailscale/bin
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=${TS_ACCEPT_DNS} \
--authkey=${TS_AUTH_KEY} \
${TS_EXTRA_ARGS}
echo "Re-enabling incoming traffic from the cluster"
wait ${PID}
volumes:
- name: host-mappings
configMap:
name: host-mappings
---
apiVersion: v1
kind: Service
metadata:
name: tekton-el
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki-distributed
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: vmcluster
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---

View File

@ -1,182 +1,47 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: host-mappings
namespace: auth-proxy
labels:
app: proxy
data:
mapping.json: |
{
"tekton-el.auth-proxy.svc": "tekton-el.cluster.local",
"home.auth-proxy.svc": "home.cluster.local",
"home.cluster.fun": "home.cluster.local",
"vmcluster.auth-proxy.svc": "vmcluster.cluster.local",
"loki.auth-proxy.svc": "loki-write.cluster.local",
"loki.auth-proxy.svc:80": "loki-write.cluster.local",
"loki-distributed.auth-proxy.svc": "loki-loki.cluster.local",
"loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local"
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: internal-proxy
namespace: auth-proxy
labels:
app: internal-proxy
annotations:
configmap.reloader.stakater.com/reload: "host-mappings"
spec:
replicas: 1
selector:
matchLabels:
app: internal-proxy
template:
metadata:
labels:
app: internal-proxy
spec:
dnsPolicy: None
dnsConfig:
nameservers:
- 100.100.100.100
containers:
- name: proxy
image: rg.fr-par.scw.cloud/averagemarcus/proxy:latest
imagePullPolicy: Always
env:
- name: PROXY_DESTINATION
value: talos.averagemarcus.github.beta.tailscale.net
- name: PORT
value: "8080"
ports:
- containerPort: 8080
protocol: TCP
volumeMounts:
- name: host-mappings
mountPath: /config/
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.22
imagePullPolicy: Always
env:
- name: AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
securityContext:
capabilities:
add:
- NET_ADMIN
command:
- sh
- -c
- |
export PATH=$PATH:/tailscale/bin
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=true \
--authkey=${AUTH_KEY} \
--hostname=auth-proxy-internal-proxy
wait ${PID}
volumes:
- name: host-mappings
configMap:
name: host-mappings
---
apiVersion: v1
kind: Service
metadata:
name: tekton-el
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: loki-distributed
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: vmcluster
namespace: auth-proxy
labels:
app: internal-proxy
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
selector:
app: internal-proxy
type: ClusterIP
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-non-auth-proxy
namespace: auth-proxy
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale-non-auth-proxy
labels:
app.kubernetes.io/name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale-non-auth-proxy
labels:
app.kubernetes.io/name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale-non-auth-proxy"
roleRef:
kind: Role
name: tailscale-non-auth-proxy
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale-non-auth-proxy
labels:
app.kubernetes.io/name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale-non-auth-proxy"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -186,6 +51,8 @@ metadata:
app: non-auth-proxy
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: non-auth-proxy
@ -194,7 +61,8 @@ spec:
labels:
app: non-auth-proxy
spec:
dnsPolicy: None
serviceAccountName: tailscale-non-auth-proxy
dnsPolicy: ClusterFirst
dnsConfig:
nameservers:
- 100.100.100.100
@ -239,14 +107,21 @@ spec:
requests:
memory: 50Mi
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.22
image: ghcr.io/tailscale/tailscale:v1.29
imagePullPolicy: Always
tty: true
env:
- name: AUTH_KEY
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_KUBE_SECRET
value: tailscale-non-auth-proxy
- name: TS_ACCEPT_DNS
value: "true"
- name: TS_EXTRA_ARGS
value: "--hostname=non-auth-proxy"
securityContext:
capabilities:
add:
@ -259,13 +134,13 @@ spec:
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --socket=/tmp/tailscaled.sock &
tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=true \
--authkey=${AUTH_KEY} \
--hostname=non-auth-proxy
--accept-dns=${TS_ACCEPT_DNS} \
--authkey=${TS_AUTH_KEY} \
${TS_EXTRA_ARGS}
echo "Re-enabling incoming traffic from the cluster"
wait ${PID}
---

View File

@ -25,6 +25,49 @@ data:
"loki-distributed.proxy-civo.svc:80": "loki-loki.cluster.local"
}
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-internal-proxy
namespace: proxy-civo
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale-internal-proxy"
roleRef:
kind: Role
name: tailscale-internal-proxy
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale-internal-proxy
labels:
app.kubernetes.io/name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale-internal-proxy"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -36,6 +79,8 @@ metadata:
configmap.reloader.stakater.com/reload: "host-mappings"
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: internal-proxy
@ -44,7 +89,8 @@ spec:
labels:
app: internal-proxy
spec:
dnsPolicy: None
serviceAccountName: tailscale-internal-proxy
dnsPolicy: ClusterFirst
dnsConfig:
nameservers:
- 100.100.100.100
@ -64,14 +110,21 @@ spec:
- name: host-mappings
mountPath: /config/
- name: tailscale
image: ghcr.io/tailscale/tailscale:v1.22
image: ghcr.io/tailscale/tailscale:v1.29
imagePullPolicy: Always
tty: true
env:
- name: AUTH_KEY
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: password
- name: TS_KUBE_SECRET
value: tailscale-internal-proxy
- name: TS_ACCEPT_DNS
value: "true"
- name: TS_EXTRA_ARGS
value: "--hostname=proxy-civo-internal-proxy"
securityContext:
capabilities:
add:
@ -84,13 +137,14 @@ spec:
if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi
if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi
echo "Starting tailscaled"
tailscaled --socket=/tmp/tailscaled.sock &
tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock &
PID=$!
echo "Running tailscale up"
tailscale --socket=/tmp/tailscaled.sock up \
--accept-dns=true \
--authkey=${AUTH_KEY} \
--hostname=proxy-civo-internal-proxy
--accept-dns=${TS_ACCEPT_DNS} \
--authkey=${TS_AUTH_KEY} \
${TS_EXTRA_ARGS}
echo "Re-enabling incoming traffic from the cluster"
wait ${PID}
volumes:
- name: host-mappings