From 52c88621d46319d3bd8839f07d2cbfbd02c4d39d Mon Sep 17 00:00:00 2001 From: Marcus Noble Date: Wed, 3 Aug 2022 13:12:49 +0100 Subject: [PATCH] Updated tailscale proxies Signed-off-by: Marcus Noble --- manifests/auth-proxy/auth-proxy.yaml | 82 +++++++- manifests/auth-proxy/internal-proxy.yaml | 231 +++++++++++++++++++++ manifests/auth-proxy/non-auth-proxy.yaml | 245 ++++++----------------- manifests/proxy-civo/non-auth-proxy.yaml | 68 ++++++- 4 files changed, 427 insertions(+), 199 deletions(-) create mode 100644 manifests/auth-proxy/internal-proxy.yaml diff --git a/manifests/auth-proxy/auth-proxy.yaml b/manifests/auth-proxy/auth-proxy.yaml index 762b5f5..b041e12 100644 --- a/manifests/auth-proxy/auth-proxy.yaml +++ b/manifests/auth-proxy/auth-proxy.yaml @@ -23,6 +23,49 @@ metadata: kube-1password/vault: Kubernetes type: Opaque --- + +apiVersion: v1 +kind: Secret +metadata: + name: tailscale-auth-proxy +type: Opaque +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tailscale-auth-proxy + labels: + app.kubernetes.io/name: tailscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tailscale-auth-proxy + labels: + app.kubernetes.io/name: tailscale +subjects: +- kind: ServiceAccount + name: "tailscale-auth-proxy" +roleRef: + kind: Role + name: tailscale-auth-proxy + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tailscale-auth-proxy + labels: + app.kubernetes.io/name: tailscale +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +- apiGroups: [""] + resourceNames: ["tailscale-auth-proxy"] + resources: ["secrets"] + verbs: ["get", "update"] +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -32,6 +75,8 @@ metadata: app: auth-proxy spec: replicas: 1 + strategy: + type: Recreate selector: matchLabels: app: auth-proxy @@ -40,10 +85,27 @@ spec: labels: app: auth-proxy spec: - dnsPolicy: None + serviceAccountName: tailscale-auth-proxy + dnsPolicy: ClusterFirst dnsConfig: nameservers: - 100.100.100.100 + initContainers: + - name: sysctler + image: busybox + securityContext: + privileged: true + command: ["/bin/sh"] + args: + - -c + - | + sysctl -w net.ipv4.ip_forward=1 + sysctl -w net.ipv6.conf.all.forwarding=1 + sysctl -w net.ipv6.conf.all.disable_ipv6=0 + resources: + requests: + cpu: 1m + memory: 1Mi containers: - name: oauth-proxy image: quay.io/oauth2-proxy/oauth2-proxy:v7.2.1 @@ -84,14 +146,20 @@ spec: requests: memory: 50Mi - name: tailscale - image: ghcr.io/tailscale/tailscale:v1.22 + image: ghcr.io/tailscale/tailscale:v1.29 imagePullPolicy: Always env: - - name: AUTH_KEY + - name: TS_AUTH_KEY valueFrom: secretKeyRef: name: tailscale-auth key: password + - name: TS_KUBE_SECRET + value: tailscale-auth-proxy + - name: TS_ACCEPT_DNS + value: "true" + - name: TS_EXTRA_ARGS + value: "--hostname=auth-proxy-oauth2" securityContext: capabilities: add: @@ -104,13 +172,13 @@ spec: if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi echo "Starting tailscaled" - tailscaled --socket=/tmp/tailscaled.sock & + tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock & PID=$! echo "Running tailscale up" tailscale --socket=/tmp/tailscaled.sock up \ - --accept-dns=true \ - --authkey=${AUTH_KEY} \ - --hostname=auth-proxy-oauth2 + --accept-dns=${TS_ACCEPT_DNS} \ + --authkey=${TS_AUTH_KEY} \ + ${TS_EXTRA_ARGS} echo "Re-enabling incoming traffic from the cluster" wait ${PID} --- diff --git a/manifests/auth-proxy/internal-proxy.yaml b/manifests/auth-proxy/internal-proxy.yaml new file mode 100644 index 0000000..6702c3e --- /dev/null +++ b/manifests/auth-proxy/internal-proxy.yaml @@ -0,0 +1,231 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: host-mappings + namespace: auth-proxy + labels: + app: proxy +data: + mapping.json: | + { + "tekton-el.auth-proxy.svc": "tekton-el.cluster.local", + "home.auth-proxy.svc": "home.cluster.local", + "home.cluster.fun": "home.cluster.local", + "vmcluster.auth-proxy.svc": "vmcluster.cluster.local", + "loki.auth-proxy.svc": "loki-write.cluster.local", + "loki.auth-proxy.svc:80": "loki-write.cluster.local", + "loki-distributed.auth-proxy.svc": "loki-loki.cluster.local", + "loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local" + } +--- +apiVersion: v1 +kind: Secret +metadata: + name: tailscale-internal-proxy + namespace: auth-proxy +type: Opaque +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tailscale-internal-proxy + labels: + app.kubernetes.io/name: tailscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tailscale-internal-proxy + labels: + app.kubernetes.io/name: tailscale +subjects: +- kind: ServiceAccount + name: "tailscale-internal-proxy" +roleRef: + kind: Role + name: tailscale-internal-proxy + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tailscale-internal-proxy + labels: + app.kubernetes.io/name: tailscale +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +- apiGroups: [""] + resourceNames: ["tailscale-internal-proxy"] + resources: ["secrets"] + verbs: ["get", "update"] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: internal-proxy + namespace: auth-proxy + labels: + app: internal-proxy + annotations: + configmap.reloader.stakater.com/reload: "host-mappings" +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: internal-proxy + template: + metadata: + labels: + app: internal-proxy + spec: + serviceAccountName: tailscale-internal-proxy + dnsPolicy: ClusterFirst + dnsConfig: + nameservers: + - 100.100.100.100 + containers: + - name: proxy + image: rg.fr-par.scw.cloud/averagemarcus/proxy:latest + imagePullPolicy: Always + env: + - name: PROXY_DESTINATION + value: talos.averagemarcus.github.beta.tailscale.net + - name: PORT + value: "8080" + ports: + - containerPort: 8080 + protocol: TCP + volumeMounts: + - name: host-mappings + mountPath: /config/ + - name: tailscale + image: ghcr.io/tailscale/tailscale:v1.29 + imagePullPolicy: Always + tty: true + env: + - name: TS_AUTH_KEY + valueFrom: + secretKeyRef: + name: tailscale-auth + key: password + - name: TS_KUBE_SECRET + value: tailscale-internal-proxy + - name: TS_ACCEPT_DNS + value: "true" + - name: TS_EXTRA_ARGS + value: "--hostname=auth-proxy-internal-proxy" + securityContext: + capabilities: + add: + - NET_ADMIN + command: + - sh + - -c + - | + export PATH=$PATH:/tailscale/bin + if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi + if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi + echo "Starting tailscaled" + tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock & + PID=$! + echo "Running tailscale up" + tailscale --socket=/tmp/tailscaled.sock up \ + --accept-dns=${TS_ACCEPT_DNS} \ + --authkey=${TS_AUTH_KEY} \ + ${TS_EXTRA_ARGS} + echo "Re-enabling incoming traffic from the cluster" + wait ${PID} + volumes: + - name: host-mappings + configMap: + name: host-mappings +--- +apiVersion: v1 +kind: Service +metadata: + name: tekton-el + namespace: auth-proxy + labels: + app: internal-proxy +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: internal-proxy + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: loki + namespace: auth-proxy + labels: + app: internal-proxy +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: internal-proxy + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: loki-distributed + namespace: auth-proxy + labels: + app: internal-proxy +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: internal-proxy + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus + namespace: auth-proxy + labels: + app: internal-proxy +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: internal-proxy + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: vmcluster + namespace: auth-proxy + labels: + app: internal-proxy +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: internal-proxy + type: ClusterIP +--- diff --git a/manifests/auth-proxy/non-auth-proxy.yaml b/manifests/auth-proxy/non-auth-proxy.yaml index 6030aa7..183025d 100644 --- a/manifests/auth-proxy/non-auth-proxy.yaml +++ b/manifests/auth-proxy/non-auth-proxy.yaml @@ -1,182 +1,47 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: host-mappings - namespace: auth-proxy - labels: - app: proxy -data: - mapping.json: | - { - "tekton-el.auth-proxy.svc": "tekton-el.cluster.local", - "home.auth-proxy.svc": "home.cluster.local", - "home.cluster.fun": "home.cluster.local", - "vmcluster.auth-proxy.svc": "vmcluster.cluster.local", - "loki.auth-proxy.svc": "loki-write.cluster.local", - "loki.auth-proxy.svc:80": "loki-write.cluster.local", - "loki-distributed.auth-proxy.svc": "loki-loki.cluster.local", - "loki-distributed.auth-proxy.svc:80": "loki-loki.cluster.local" - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: internal-proxy - namespace: auth-proxy - labels: - app: internal-proxy - annotations: - configmap.reloader.stakater.com/reload: "host-mappings" -spec: - replicas: 1 - selector: - matchLabels: - app: internal-proxy - template: - metadata: - labels: - app: internal-proxy - spec: - dnsPolicy: None - dnsConfig: - nameservers: - - 100.100.100.100 - containers: - - name: proxy - image: rg.fr-par.scw.cloud/averagemarcus/proxy:latest - imagePullPolicy: Always - env: - - name: PROXY_DESTINATION - value: talos.averagemarcus.github.beta.tailscale.net - - name: PORT - value: "8080" - ports: - - containerPort: 8080 - protocol: TCP - volumeMounts: - - name: host-mappings - mountPath: /config/ - - name: tailscale - image: ghcr.io/tailscale/tailscale:v1.22 - imagePullPolicy: Always - env: - - name: AUTH_KEY - valueFrom: - secretKeyRef: - name: tailscale-auth - key: password - securityContext: - capabilities: - add: - - NET_ADMIN - command: - - sh - - -c - - | - export PATH=$PATH:/tailscale/bin - if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi - if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi - echo "Starting tailscaled" - tailscaled --socket=/tmp/tailscaled.sock & - PID=$! - echo "Running tailscale up" - tailscale --socket=/tmp/tailscaled.sock up \ - --accept-dns=true \ - --authkey=${AUTH_KEY} \ - --hostname=auth-proxy-internal-proxy - wait ${PID} - volumes: - - name: host-mappings - configMap: - name: host-mappings ---- -apiVersion: v1 -kind: Service -metadata: - name: tekton-el - namespace: auth-proxy - labels: - app: internal-proxy -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: internal-proxy - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: loki - namespace: auth-proxy - labels: - app: internal-proxy -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: internal-proxy - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: loki-distributed - namespace: auth-proxy - labels: - app: internal-proxy -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: internal-proxy - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: prometheus - namespace: auth-proxy - labels: - app: internal-proxy -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: internal-proxy - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - name: vmcluster - namespace: auth-proxy - labels: - app: internal-proxy -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: internal-proxy - type: ClusterIP ---- - +apiVersion: v1 +kind: Secret +metadata: + name: tailscale-non-auth-proxy + namespace: auth-proxy +type: Opaque +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tailscale-non-auth-proxy + labels: + app.kubernetes.io/name: tailscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tailscale-non-auth-proxy + labels: + app.kubernetes.io/name: tailscale +subjects: +- kind: ServiceAccount + name: "tailscale-non-auth-proxy" +roleRef: + kind: Role + name: tailscale-non-auth-proxy + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tailscale-non-auth-proxy + labels: + app.kubernetes.io/name: tailscale +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +- apiGroups: [""] + resourceNames: ["tailscale-non-auth-proxy"] + resources: ["secrets"] + verbs: ["get", "update"] +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -186,6 +51,8 @@ metadata: app: non-auth-proxy spec: replicas: 1 + strategy: + type: Recreate selector: matchLabels: app: non-auth-proxy @@ -194,7 +61,8 @@ spec: labels: app: non-auth-proxy spec: - dnsPolicy: None + serviceAccountName: tailscale-non-auth-proxy + dnsPolicy: ClusterFirst dnsConfig: nameservers: - 100.100.100.100 @@ -239,14 +107,21 @@ spec: requests: memory: 50Mi - name: tailscale - image: ghcr.io/tailscale/tailscale:v1.22 + image: ghcr.io/tailscale/tailscale:v1.29 imagePullPolicy: Always + tty: true env: - - name: AUTH_KEY + - name: TS_AUTH_KEY valueFrom: secretKeyRef: name: tailscale-auth key: password + - name: TS_KUBE_SECRET + value: tailscale-non-auth-proxy + - name: TS_ACCEPT_DNS + value: "true" + - name: TS_EXTRA_ARGS + value: "--hostname=non-auth-proxy" securityContext: capabilities: add: @@ -259,13 +134,13 @@ spec: if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi echo "Starting tailscaled" - tailscaled --socket=/tmp/tailscaled.sock & + tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock & PID=$! echo "Running tailscale up" tailscale --socket=/tmp/tailscaled.sock up \ - --accept-dns=true \ - --authkey=${AUTH_KEY} \ - --hostname=non-auth-proxy + --accept-dns=${TS_ACCEPT_DNS} \ + --authkey=${TS_AUTH_KEY} \ + ${TS_EXTRA_ARGS} echo "Re-enabling incoming traffic from the cluster" wait ${PID} --- diff --git a/manifests/proxy-civo/non-auth-proxy.yaml b/manifests/proxy-civo/non-auth-proxy.yaml index 6b79fdb..a9ac576 100644 --- a/manifests/proxy-civo/non-auth-proxy.yaml +++ b/manifests/proxy-civo/non-auth-proxy.yaml @@ -25,6 +25,49 @@ data: "loki-distributed.proxy-civo.svc:80": "loki-loki.cluster.local" } --- +apiVersion: v1 +kind: Secret +metadata: + name: tailscale-internal-proxy + namespace: proxy-civo +type: Opaque +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tailscale-internal-proxy + labels: + app.kubernetes.io/name: tailscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tailscale-internal-proxy + labels: + app.kubernetes.io/name: tailscale +subjects: +- kind: ServiceAccount + name: "tailscale-internal-proxy" +roleRef: + kind: Role + name: tailscale-internal-proxy + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tailscale-internal-proxy + labels: + app.kubernetes.io/name: tailscale +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +- apiGroups: [""] + resourceNames: ["tailscale-internal-proxy"] + resources: ["secrets"] + verbs: ["get", "update"] +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -36,6 +79,8 @@ metadata: configmap.reloader.stakater.com/reload: "host-mappings" spec: replicas: 1 + strategy: + type: Recreate selector: matchLabels: app: internal-proxy @@ -44,7 +89,8 @@ spec: labels: app: internal-proxy spec: - dnsPolicy: None + serviceAccountName: tailscale-internal-proxy + dnsPolicy: ClusterFirst dnsConfig: nameservers: - 100.100.100.100 @@ -64,14 +110,21 @@ spec: - name: host-mappings mountPath: /config/ - name: tailscale - image: ghcr.io/tailscale/tailscale:v1.22 + image: ghcr.io/tailscale/tailscale:v1.29 imagePullPolicy: Always + tty: true env: - - name: AUTH_KEY + - name: TS_AUTH_KEY valueFrom: secretKeyRef: name: tailscale-auth key: password + - name: TS_KUBE_SECRET + value: tailscale-internal-proxy + - name: TS_ACCEPT_DNS + value: "true" + - name: TS_EXTRA_ARGS + value: "--hostname=proxy-civo-internal-proxy" securityContext: capabilities: add: @@ -84,13 +137,14 @@ spec: if [[ ! -d /dev/net ]]; then mkdir -p /dev/net; fi if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200; fi echo "Starting tailscaled" - tailscaled --socket=/tmp/tailscaled.sock & + tailscaled --state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock & PID=$! echo "Running tailscale up" tailscale --socket=/tmp/tailscaled.sock up \ - --accept-dns=true \ - --authkey=${AUTH_KEY} \ - --hostname=proxy-civo-internal-proxy + --accept-dns=${TS_ACCEPT_DNS} \ + --authkey=${TS_AUTH_KEY} \ + ${TS_EXTRA_ARGS} + echo "Re-enabling incoming traffic from the cluster" wait ${PID} volumes: - name: host-mappings