Compare commits
3 Commits
92b9c9d927
...
c6a9a90dde
Author | SHA1 | Date | |
---|---|---|---|
c6a9a90dde
|
|||
1d34f433ef
|
|||
8be57f1430
|
103
home/.bin/gs-aws
103
home/.bin/gs-aws
@@ -1,103 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
ACCOUNT_ID=${AWS_ACCOUNTID}
|
||||
ROLE=GiantSwarmAdmin
|
||||
MFA=
|
||||
MFA_ARN=arn:aws:iam::${AWS_ACCOUNTID}:mfa/marcus@giantswarm.io
|
||||
|
||||
print_usage() {
|
||||
orange "gs-aws - set up AWS credentials"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-aws"
|
||||
echo " "
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo "-a, --account the AWS account number (default: \$AWS_ACCOUNTID)"
|
||||
echo "-r, --role the role to assume (default: GiantSwarmAdmin)"
|
||||
echo "-t, --mfa-token the MFA token to use when generating a session [Required]"
|
||||
echo "-m, --mfa-arn the ARN of the MFA device (Default ${MFA_ARN})"
|
||||
}
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-a|--account)
|
||||
shift
|
||||
ACCOUNT_ID=$1
|
||||
shift
|
||||
;;
|
||||
-r|--role)
|
||||
shift
|
||||
ROLE=$1
|
||||
shift
|
||||
;;
|
||||
-t|--mfa-token)
|
||||
shift
|
||||
MFA=$1
|
||||
shift
|
||||
;;
|
||||
-m|--mfa-arn)
|
||||
shift
|
||||
MFA_ARN=$1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z $AWS_ACCESS_KEY_ID ] || [ -z $AWS_SECRET_ACCESS_KEY ] || [ -z $ACCOUNT_ID ]; then
|
||||
echo "Initial AWS credentials required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $MFA ] || [ -z $MFA_ARN ]; then
|
||||
echo "MFA token and ARN required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
unset AWS_PROFILE
|
||||
|
||||
printf "✨ Getting session credentials..."
|
||||
SESSION_JSON=$(aws sts get-session-token --serial-number ${MFA_ARN} --token-code ${MFA})
|
||||
printf "\n\e[1A\e[K✅ Got session credentials\n"
|
||||
|
||||
export AWS_SECRET_ACCESS_KEY=$(echo $SESSION_JSON | jq -r '.Credentials.SecretAccessKey')
|
||||
export AWS_ACCESS_KEY_ID=$(echo $SESSION_JSON | jq -r '.Credentials.AccessKeyId')
|
||||
export AWS_SESSION_TOKEN=$(echo $SESSION_JSON | jq -r '.Credentials.SessionToken')
|
||||
export EXPIRATION=$(echo $SESSION_JSON | jq -r '.Credentials.Expiration')
|
||||
|
||||
if [ "${ACCOUNT_ID}" != "${AWS_ACCOUNTID}" ]; then
|
||||
printf "✨ Assuming cross-account role..."
|
||||
ASSUME_SESSION=$(aws sts assume-role --role-session-name $(whoami)-aws --role-arn arn:aws:iam::${ACCOUNT_ID}:role/${ROLE})
|
||||
export AWS_SECRET_ACCESS_KEY=$(echo $ASSUME_SESSION | jq -r '.Credentials.SecretAccessKey')
|
||||
export AWS_ACCESS_KEY_ID=$(echo $ASSUME_SESSION | jq -r '.Credentials.AccessKeyId')
|
||||
export AWS_SESSION_TOKEN=$(echo $ASSUME_SESSION | jq -r '.Credentials.SessionToken')
|
||||
export EXPIRATION=$(echo $ASSUME_SESSION | jq -r '.Credentials.Expiration')
|
||||
printf "\n\e[1A\e[K✅ Assumed role\n"
|
||||
fi
|
||||
|
||||
mkdir -p ~/.aws
|
||||
cat > ~/.aws/credentials << EOF
|
||||
[giantswarm]
|
||||
aws_access_key_id=${AWS_ACCESS_KEY_ID}
|
||||
aws_secret_access_key=${AWS_SECRET_ACCESS_KEY}
|
||||
aws_session_token=${AWS_SESSION_TOKEN}
|
||||
expiration=${EXPIRATION}
|
||||
EOF
|
||||
|
||||
echo "⚡️ AWS credentials setup"
|
||||
echo ""
|
||||
echo "ℹ️ You'll need to switch to the 'giantswarm' profile:"
|
||||
echo ""
|
||||
echo "unset AWS_ACCESS_KEY_ID"
|
||||
echo "unset AWS_SECRET_ACCESS_KEY"
|
||||
echo "export AWS_PROFILE=giantswarm"
|
@@ -1,108 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
DRY_RUN=0
|
||||
NAMESPACE="org-giantswarm"
|
||||
RELEASE="20.0.0-alpha1"
|
||||
PROVIDER="aws"
|
||||
AZS="eu-west-1a"
|
||||
DESCRIPTION="$(whoami)'s test cluster"
|
||||
|
||||
print_usage() {
|
||||
orange "gs-create-cluster - create a Giant Swarm managed CAPI workload cluster"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-create-cluster [cluster-name]"
|
||||
echo " "
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo "-n, --namespace the namespace the cluster is in (default: org-giantswarm)"
|
||||
echo "-r, --release the namespace the cluster is in (default: 20.0.0-alpha1)"
|
||||
echo "-p, --provider the cloud provider to use (default: aws)"
|
||||
}
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-n|--namespace)
|
||||
shift
|
||||
NAMESPACE=$1
|
||||
shift
|
||||
;;
|
||||
-r|--release)
|
||||
shift
|
||||
RELEASE=$1
|
||||
shift
|
||||
;;
|
||||
-p|--provider)
|
||||
shift
|
||||
PROVIDER=$1
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Positional args
|
||||
NAME=${1:-wc001}
|
||||
|
||||
PREFIXED_NAMESPACE="org-$NAMESPACE"
|
||||
case $NAMESPACE in org-*)
|
||||
PREFIXED_NAMESPACE="$NAMESPACE"
|
||||
NAMESPACE=${NAMESPACE#"org-"}
|
||||
esac
|
||||
|
||||
CAPA_CLUSTER="--provider capa"
|
||||
CAPZ_CLUSTER="--provider azure --release ${RELEASE}"
|
||||
CAPG_CLUSTER="--provider gcp --gcp-project giantswarm-352614 --region europe-west3 --gcp-failure-domains europe-west3-a --gcp-machine-deployment-failure-domain europe-west3-a"
|
||||
TEMPLATE_ARGS="--name ${NAME:0:5} --organization ${NAMESPACE}"
|
||||
case "${PROVIDER}" in
|
||||
aws)
|
||||
TEMPLATE_ARGS="${TEMPLATE_ARGS} ${CAPA_CLUSTER}"
|
||||
;;
|
||||
gcp)
|
||||
TEMPLATE_ARGS="${TEMPLATE_ARGS} ${CAPG_CLUSTER}"
|
||||
;;
|
||||
azure)
|
||||
TEMPLATE_ARGS="${TEMPLATE_ARGS} ${CAPZ_CLUSTER}"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported provider type"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "✨ Pre-flight checks"
|
||||
gs-get-cluster --namespace ${PREFIXED_NAMESPACE} ${NAME} &>/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Cluster named '${NAME}' already exists"
|
||||
exit 1
|
||||
elif [[ "${PROVIDER}" = "aws" ]]; then
|
||||
echo "Cleaning up any old awsclusterroleidentities..."
|
||||
kubectl delete --namespace ${PREFIXED_NAMESPACE} awsclusterroleidentities ${NAME} 2>/dev/null
|
||||
fi
|
||||
|
||||
echo "✨ Creating a new ${PROVIDER} cluster called '${NAMESPACE}/${NAME}' with release '${RELEASE}'"
|
||||
if [[ $DRY_RUN = 1 ]]; then
|
||||
echo kubectl-gs template cluster ${TEMPLATE_ARGS} --description "${DESCRIPTION}"
|
||||
kubectl-gs template cluster ${TEMPLATE_ARGS} --description "${DESCRIPTION}"
|
||||
else
|
||||
kubectl-gs template cluster ${TEMPLATE_ARGS} --description "${DESCRIPTION}" | kubectl apply -f -
|
||||
fi
|
||||
|
||||
if [[ $DRY_RUN = 0 ]]; then
|
||||
sleep 10
|
||||
echo "✨ Checking status..."
|
||||
gs-get-cluster --namespace ${PREFIXED_NAMESPACE} ${NAME}
|
||||
fi
|
@@ -1,214 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
TEMPLATE="giantswarm/template-app"
|
||||
VISIBILITY="public"
|
||||
CODEOWNER_TEAM="team-tinkerers"
|
||||
|
||||
print_usage() {
|
||||
orange "gs-create-repo - a new Giant Swarm repo"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-create-repo (flags) [repo-name]"
|
||||
echo " "
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo "-t, --template the template repo to base the new repo on (default: ${TEMPLATE})"
|
||||
echo " --team the team to be set as codeowner of the repo (default: ${CODEOWNER_TEAM})"
|
||||
echo " --visibility the visibility of the repo (default: ${VISIBILITY}"
|
||||
}
|
||||
|
||||
POS_ARGS=()
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-t|--template)
|
||||
shift
|
||||
TEMPLATE=$1
|
||||
shift
|
||||
;;
|
||||
-p|--private)
|
||||
shift
|
||||
VISIBILITY="private"
|
||||
;;
|
||||
--visibility)
|
||||
shift
|
||||
VISIBILITY=$1
|
||||
shift
|
||||
;;
|
||||
--team)
|
||||
shift
|
||||
CODEOWNER_TEAM=$1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
POS_ARGS+=${1}
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
case $TEMPLATE in
|
||||
*/*)
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
TEMPLATE="giantswarm/${TEMPLATE}"
|
||||
;;
|
||||
esac
|
||||
|
||||
REPOSITORY_NAME=${POS_ARGS[0]}
|
||||
|
||||
#############################################
|
||||
|
||||
echo "✨ Creating new repo $(italic ${VISIBILITY}) $(orange ${REPOSITORY_NAME}) using base template $(blue ${TEMPLATE}) assigned to $(underline ${CODEOWNER_TEAM})"
|
||||
|
||||
printf "Continue? (y/n): "
|
||||
read CONFIRM
|
||||
if [[ "${CONFIRM}" != "y" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gh repo create --${VISIBILITY} --template ${TEMPLATE} giantswarm/${REPOSITORY_NAME}
|
||||
|
||||
echo "Waiting breifly for cluster to be created from template..."
|
||||
sleep 5
|
||||
|
||||
DST="${HOME}/Code/giantswarm/${REPOSITORY_NAME}"
|
||||
echo "Cloning repo to ${DST}"
|
||||
mkdir -p ${DST}
|
||||
git clone git@github.com:giantswarm/${REPOSITORY_NAME}.git ${DST}
|
||||
cd ${DST}
|
||||
|
||||
if [ -d helm/APP-NAME ]; then
|
||||
mv 'helm/{APP-NAME}' helm/${REPOSITORY_NAME}
|
||||
fi
|
||||
|
||||
devctl replace -i '{APP-NAME}' ${REPOSITORY_NAME} --ignore .git ./.** ./**
|
||||
|
||||
# Clean up some stuff
|
||||
sed -i 's|\[Read me after cloning this template (GS staff only)\](https://intranet.giantswarm.io/docs/dev-and-releng/app-developer-processes/adding_app_to_appcatalog/)||g' README.md
|
||||
sed -i 's|- {APP HELM REPOSITORY}||g' README.md
|
||||
sed -i '$!N; /^\(.*\)\n\1$/!P; D' README.md
|
||||
sed -i 's/- .*//' CHANGELOG.md
|
||||
sed -i '$!N; /^\(.*\)\n\1$/!P; D' CHANGELOG.md
|
||||
|
||||
echo ""
|
||||
blue "Run Kubebuilder init? (y/n): "
|
||||
read CONFIRM
|
||||
if [ "${CONFIRM}" = "y" ]; then
|
||||
mv helm .helm
|
||||
kubebuilder init --domain giantswarm.io --repo github.com/giantswarm/${REPOSITORY_NAME} --plugins=go/v4-alpha
|
||||
mv .helm helm
|
||||
mv Makefile Makefile.kubebuilder.mk
|
||||
go mod tidy
|
||||
fi
|
||||
|
||||
devctl gen workflows --flavour app --flavour generic --check-secrets
|
||||
devctl gen makefile --flavour app --flavour generic --language go
|
||||
touch Makefile.custom.mk
|
||||
|
||||
echo ""
|
||||
blue "Update Circle-CI job? (y/n): "
|
||||
read CONFIRM
|
||||
if [ "${CONFIRM}" = "y" ]; then
|
||||
|
||||
cat << EOF > .circleci/config.yml
|
||||
version: 2.1
|
||||
orbs:
|
||||
architect: giantswarm/architect@4.24.0
|
||||
|
||||
workflows:
|
||||
test-and-push:
|
||||
jobs:
|
||||
- architect/go-build:
|
||||
context: architect
|
||||
name: go-build
|
||||
binary: ${REPOSITORY_NAME}
|
||||
resource_class: xlarge
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- architect/push-to-docker:
|
||||
context: architect
|
||||
name: push-${REPOSITORY_NAME}-to-quay
|
||||
image: "quay.io/giantswarm/${REPOSITORY_NAME}"
|
||||
username_envar: "QUAY_USERNAME"
|
||||
password_envar: "QUAY_PASSWORD"
|
||||
requires:
|
||||
- go-build
|
||||
filters:
|
||||
# Trigger the job also on git tag.
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- architect/push-to-docker:
|
||||
context: "architect"
|
||||
name: push-${REPOSITORY_NAME}-to-docker
|
||||
image: "docker.io/giantswarm/${REPOSITORY_NAME}"
|
||||
username_envar: "DOCKER_USERNAME"
|
||||
password_envar: "DOCKER_PASSWORD"
|
||||
requires:
|
||||
- go-build
|
||||
# Needed to trigger job also on git tag.
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
# Ensure that for every commit
|
||||
# there is an app version in the test catalog.
|
||||
- architect/push-to-app-catalog:
|
||||
context: architect
|
||||
name: push-to-app-catalog
|
||||
app_catalog: "control-plane-catalog"
|
||||
app_catalog_test: "control-plane-test-catalog"
|
||||
chart: "${REPOSITORY_NAME}"
|
||||
requires:
|
||||
- push-${REPOSITORY_NAME}-to-quay
|
||||
- push-${REPOSITORY_NAME}-to-docker
|
||||
filters:
|
||||
# Trigger the job also on git tag.
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- architect/push-to-app-collection:
|
||||
context: architect
|
||||
name: push-to-gcp-app-collection
|
||||
app_name: "${REPOSITORY_NAME}"
|
||||
app_collection_repo: "gcp-app-collection"
|
||||
requires:
|
||||
- push-to-app-catalog
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v.*/
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
git add -A
|
||||
git commit -m "Initial repo scaffold and setup"
|
||||
git push
|
||||
|
||||
devctl repo setup giantswarm/${REPOSITORY_NAME} \
|
||||
--allow-automerge=true --allow-mergecommit=false --allow-rebasemerge=false \
|
||||
--allow-squashmerge=true --allow-updatebranch=true --delete-branch-on-merge=true \
|
||||
--enable-issues=true --enable-projects=false --enable-wiki=false
|
||||
|
||||
echo ""
|
||||
echo "🎉 New repo $(orange ${REPOSITORY_NAME}) created! - https://github.com/giantswarm/${REPOSITORY_NAME}"
|
||||
echo ""
|
||||
|
||||
echo "⚡️ Adding reference to $(orange ${REPOSITORY_NAME}) in giantswarm/github"
|
||||
cd "${HOME}/Code/giantswarm/github"
|
||||
git checkout main
|
||||
git pull
|
||||
yq -i '. += {"name": "'${REPOSITORY_NAME}'", "gen": {"flavour": "app,generic", "language": "go"}, "replace": {"architect-orb": true, "renovate": true}} | sort_by(.name)' repositories/${CODEOWNER_TEAM}.yaml
|
||||
git add repositories/${CODEOWNER_TEAM}.yaml
|
||||
git commit -m "Added ${REPOSITORY_NAME} to ${CODEOWNER_TEAM} repos"
|
||||
git push
|
||||
|
||||
cd -
|
@@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
set -e
|
||||
|
||||
NAMESPACE="org-giantswarm"
|
||||
|
||||
print_usage() {
|
||||
orange "gs-get-cluster - get a Giant Swarm managed workload cluster"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-get-cluster [cluster-name]"
|
||||
echo " "
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo "-n, --namespace the namespace the cluster is in (default: org-giantswarm)"
|
||||
}
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-n|--namespace)
|
||||
shift
|
||||
NAMESPACE=$1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
kubectl-gs get cluster --namespace $NAMESPACE $@ 2>/dev/null || kubectl get cl --namespace $NAMESPACE $@
|
@@ -1,83 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
DEBUG=""
|
||||
|
||||
print_usage() {
|
||||
orange "gs-login - login to Giant Swarm managed clusters"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-login [INSTALLATION] [WORKLOAD CLUSTER] [ORGANISATION]"
|
||||
echo " "
|
||||
underline "Examples:"
|
||||
echo "> gs-login gauss"
|
||||
echo "> gs-login gauss mywc1"
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
}
|
||||
|
||||
POS_ARGS=()
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-t|--ttl)
|
||||
shift
|
||||
echo "-t / --ttl no longer handled"
|
||||
shift
|
||||
;;
|
||||
-g|--certificate-group)
|
||||
shift
|
||||
echo "-g / --certificate-group no longer handled"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
--debug)
|
||||
DEBUG="--level=debug"
|
||||
shift
|
||||
;;
|
||||
/)
|
||||
# We want to ignore slash seperators between MC and WC
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
POS_ARGS+=(`echo $1 | tr '/' ' '`)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ ${#POS_ARGS[@]} -eq 0 ]; then
|
||||
POS_ARGS+=(`opsctl list installations --short | tr ' ' '\n' | fzf`)
|
||||
fi
|
||||
|
||||
case ${#POS_ARGS[@]} in
|
||||
0)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
kubectl config delete-context gs-${POS_ARGS[0]} &>/dev/null
|
||||
|
||||
TELEPORT_CLUSTER_NAME="$(echo "${POS_ARGS[@]}" | tr ' ' '-')"
|
||||
TELEPORT_SUPPORTED=$(tsh kube ls -f json --query "name == \"${TELEPORT_CLUSTER_NAME}\"" 2>/dev/null | jq '. | length')
|
||||
if [[ "${TELEPORT_SUPPORTED}" == "0" ]]; then
|
||||
# Teleport not supported, old style login
|
||||
echo "Cluster isn't know to Teleport, using old login method"
|
||||
opsctl login ${DEBUG} ${POS_ARGS[@]}
|
||||
else
|
||||
echo "Logging in with Teleport. Cluster: '${TELEPORT_CLUSTER_NAME}'"
|
||||
# Make sure that caching is disabled to avoid issues with cross-cluster cache pollution
|
||||
TELEPORT_CACHE_DIR="${HOME}/.kube/cache/discovery/teleport.giantswarm.io_443"
|
||||
if [[ "$(readlink -f ${TELEPORT_CACHE_DIR})" != "/dev/null" ]]; then
|
||||
rm -rf ${TELEPORT_CACHE_DIR}
|
||||
ln -s /dev/null ${TELEPORT_CACHE_DIR}
|
||||
fi
|
||||
tsh kube login ${TELEPORT_CLUSTER_NAME}
|
||||
fi
|
||||
;;
|
||||
esac
|
@@ -1,85 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
DEBUG=""
|
||||
|
||||
SUPPORTED_APPS="alertmanager cloudprovider grafana happa kibana kyverno prometheus"
|
||||
|
||||
print_usage() {
|
||||
orange "gs-open - open apps on Giant Swarm clusters"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-open [APP] [INSTALLATION] [WORKLOAD CLUSTER] "
|
||||
echo " "
|
||||
underline "Supported apps:"
|
||||
italic "${SUPPORTED_APPS}"
|
||||
echo " "
|
||||
underline "Examples:"
|
||||
echo "> gs-open prometheus gauss"
|
||||
echo "> gs-open alertmanager gauss mywc1"
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo " --debug show debug log output"
|
||||
}
|
||||
|
||||
POS_ARGS=()
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
--debug)
|
||||
DEBUG="--level=debug"
|
||||
shift
|
||||
;;
|
||||
/)
|
||||
# We want to ignore slash seperators between MC and WC
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
POS_ARGS+=(`echo $1 | tr '/' ' '`)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ ${#POS_ARGS[@]} -eq 0 ]; then
|
||||
POS_ARGS+=(`echo ${SUPPORTED_APPS} | tr ' ' '\n' | fzf`)
|
||||
fi
|
||||
if [ ${#POS_ARGS[@]} -eq 1 ]; then
|
||||
POS_ARGS+=(`opsctl list installations --short | tr ' ' '\n' | fzf`)
|
||||
fi
|
||||
|
||||
APP=${POS_ARGS[0]}
|
||||
if [[ "${APP}" == "cloud" ]]; then
|
||||
APP=cloudprovider
|
||||
fi
|
||||
if [[ "${APP}" == "prom" ]]; then
|
||||
APP=prometheus
|
||||
fi
|
||||
|
||||
case ${#POS_ARGS[@]} in
|
||||
0)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
2)
|
||||
echo "✨ Opening ${APP} on ${POS_ARGS[1]}"
|
||||
|
||||
TELEPORT_SUPPORTED=$(tsh kube ls -f json --query "name == \"${POS_ARGS[1]}\"" 2>/dev/null | jq '. | length')
|
||||
if [[ "${APP}" == "grafana" ]] && [[ "${TELEPORT_SUPPORTED}" == "1" ]]; then
|
||||
tsh apps login "grafana-${POS_ARGS[1]}"
|
||||
open https://grafana-${POS_ARGS[1]}.teleport.giantswarm.io
|
||||
else
|
||||
opsctl open ${DEBUG} --app ${APP} --installation ${POS_ARGS[1]}
|
||||
fi
|
||||
;;
|
||||
3)
|
||||
echo "✨ Opening ${APP} on ${POS_ARGS[1]} / ${POS_ARGS[2]}"
|
||||
opsctl open ${DEBUG} --app ${APP} --installation ${POS_ARGS[1]} --workload-cluster ${POS_ARGS[2]}
|
||||
;;
|
||||
esac
|
@@ -1,118 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
set -e
|
||||
|
||||
print_usage() {
|
||||
orange "gs-release - create a new release of a Giant Swarm repo"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-release [SEMVER LEVEL]"
|
||||
echo " "
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
}
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
SEMVER=$1
|
||||
|
||||
CURRENT_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
MAIN_BRANCH=$(git remote show origin 2>/dev/null|grep HEAD|sed 's/.* //')
|
||||
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
if [ "$MAIN_BRANCH" != "$CURRENT_BRANCH" ]; then
|
||||
echo "Not currently on main branch, please switch to ${MAIN_BRANCH} to perform a release"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION_PARTS=($(echo $CURRENT_TAG | tr "." "\n"))
|
||||
VERSION_MAJOR=${VERSION_PARTS[0]}
|
||||
VERSION_MINOR=${VERSION_PARTS[1]}
|
||||
VERSION_PATCH=${VERSION_PARTS[2]}
|
||||
|
||||
echo "The latest released version is ${CURRENT_TAG}"
|
||||
echo ""
|
||||
|
||||
echo "The release will include:"
|
||||
sed -n "/^## \[Unreleased\]$/,/^## /p" CHANGELOG.md | tail -n +3 | tail -r | tail -n +2 | tail -r | sed "s/^/ /g"
|
||||
echo ""
|
||||
|
||||
if [[ "$SEMVER" == "" ]]; then
|
||||
printf "What semver release level? (patch, minor or major): "
|
||||
read SEMVER
|
||||
fi
|
||||
|
||||
case ${SEMVER} in
|
||||
patch)
|
||||
VERSION_PATCH=$((VERSION_PATCH+1))
|
||||
;;
|
||||
|
||||
minor)
|
||||
VERSION_MINOR=$((VERSION_MINOR+1))
|
||||
VERSION_PATCH=0
|
||||
;;
|
||||
|
||||
major)
|
||||
if [[ ${VERSION_MAJOR:0:1} == "v" ]]; then
|
||||
VERSION_MAJOR="v$((VERSION_MAJOR+1))"
|
||||
else
|
||||
VERSION_MAJOR=$((VERSION_MAJOR+1))
|
||||
fi
|
||||
VERSION_MINOR=0
|
||||
VERSION_PATCH=0
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown Semver level provided"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
NEW_VERSION="${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}"
|
||||
NEW_BRANCH="${MAIN_BRANCH}#release#${NEW_VERSION}"
|
||||
|
||||
echo ""
|
||||
echo "✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ "
|
||||
echo "Current version ${CURRENT_TAG}"
|
||||
echo " New version ${NEW_VERSION}"
|
||||
echo " Release branch ${NEW_BRANCH}"
|
||||
echo "✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ ✨ "
|
||||
echo ""
|
||||
|
||||
printf "Confirm? (y/n): "
|
||||
read CONFIRM
|
||||
|
||||
if [ "${CONFIRM}" = "y" ]; then
|
||||
echo ""
|
||||
echo "Publishing new release branch..."
|
||||
echo ""
|
||||
git checkout -b "${NEW_BRANCH}"
|
||||
git push -u origin "${NEW_BRANCH}"
|
||||
|
||||
ORG_NAME=$(git remote get-url origin | sed 's/.*github.com[:|\/]//' | sed 's/\.git$//' | tr '/' ' ' | awk '{print $1}')
|
||||
REPO_NAME=$(git remote get-url origin | sed 's/.*github.com[:|\/]//' | sed 's/\.git$//' | tr '/' ' ' | awk '{print $2}')
|
||||
|
||||
echo ""
|
||||
echo "🚀 Keep an eye on $(underline "https://github.com/${ORG_NAME}/${REPO_NAME}/pulls") for the new release PR"
|
||||
echo ""
|
||||
|
||||
echo "Switching back to the main git branch"
|
||||
git checkout $(git remote show origin 2>/dev/null|grep HEAD|sed 's/.* //') &> /dev/null
|
||||
git pull &> /dev/null
|
||||
else
|
||||
echo "Aborting..."
|
||||
exit 1
|
||||
fi
|
@@ -1,75 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
DEBUG=""
|
||||
|
||||
print_usage() {
|
||||
orange "gs-ssh - ssh to Giant Swarm managed cluster node"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-ssh [CLUSTER NAME] [NODE NAME]"
|
||||
echo " "
|
||||
underline "Examples:"
|
||||
echo "> gs-ssh gauss"
|
||||
echo "> gs-ssh gauss ip-1-2-3-4.ey-west-1.compute.internal"
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
}
|
||||
|
||||
POS_ARGS=()
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
--debug)
|
||||
DEBUG="--level=debug"
|
||||
shift
|
||||
;;
|
||||
/)
|
||||
# We want to ignore slash seperators between MC and WC
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
POS_ARGS+=(`echo $1 | tr '/' ' '`)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ ${#POS_ARGS[@]} -eq 0 ]; then
|
||||
POS_ARGS+=(`opsctl list installations --short | tr ' ' '\n' | fzf`)
|
||||
fi
|
||||
|
||||
TELEPORT_CLUSTER_NAME="$(echo "${POS_ARGS[0]}" | tr ' ' '-')"
|
||||
TELEPORT_SUPPORTED=$(tsh kube ls -f json --query "name == \"${TELEPORT_CLUSTER_NAME}\"" 2>/dev/null | jq '. | length')
|
||||
|
||||
if [ ${#POS_ARGS[@]} -eq 1 ]; then
|
||||
if [[ "${TELEPORT_SUPPORTED}" == "0" ]]; then
|
||||
orange "Node name must be specified if cluster doesn't support Teleport"
|
||||
exit 1
|
||||
else
|
||||
POS_ARGS+=(`tsh ls -f names cluster=${POS_ARGS[0]} | fzf`)
|
||||
fi
|
||||
fi
|
||||
|
||||
kubectl config delete-context gs-${POS_ARGS[0]} &>/dev/null
|
||||
|
||||
if [[ "${TELEPORT_SUPPORTED}" == "0" ]]; then
|
||||
# Teleport not supported, old style login
|
||||
echo "Cluster isn't know to Teleport, using old ssh method"
|
||||
opsctl ssh ${DEBUG} ${POS_ARGS[@]}
|
||||
else
|
||||
echo "SSHing with Teleport. Cluster: '${TELEPORT_CLUSTER_NAME}' Node: ${POS_ARGS[1]}"
|
||||
# Make sure that caching is disabled to avoid issues with cross-cluster cache pollution
|
||||
TELEPORT_CACHE_DIR="${HOME}/.kube/cache/discovery/teleport.giantswarm.io_443"
|
||||
if [[ "$(readlink -f ${TELEPORT_CACHE_DIR})" != "/dev/null" ]]; then
|
||||
rm -rf ${TELEPORT_CACHE_DIR}
|
||||
ln -s /dev/null ${TELEPORT_CACHE_DIR}
|
||||
fi
|
||||
tsh ssh root@cluster=${POS_ARGS[0]},node=${POS_ARGS[1]}
|
||||
fi
|
@@ -1,97 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
CODE_DIR="${HOME}/Code/GiantSwarm/cluster-standup-teardown"
|
||||
CUR_DIR=$(pwd)
|
||||
OUTPUT_DIR=${CUR_DIR}
|
||||
|
||||
print_usage() {
|
||||
orange "gs-standup - Standup a new test workload cluster, using the cluster-test-suites modules."
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-standup [provider]"
|
||||
echo " "
|
||||
underline "Examples:"
|
||||
echo "> gs-standup capa"
|
||||
echo "> gs-standup eks"
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo " --dir override the directory of cluster-test-suite code"
|
||||
echo "-o --output override the directory the files output to"
|
||||
}
|
||||
|
||||
POS_ARGS=()
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
--dir)
|
||||
shift
|
||||
CODE_DIR=$1
|
||||
shift
|
||||
;;
|
||||
-o|--output)
|
||||
shift
|
||||
OUTPUT_DIR=$1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
POS_ARGS+=(`echo $1 | tr '/' ' '`)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
case ${#POS_ARGS[@]} in
|
||||
0)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
1)
|
||||
PROVIDER=""
|
||||
CONTEXT=""
|
||||
|
||||
case ${POS_ARGS[0]} in
|
||||
"aws"|"capa")
|
||||
PROVIDER="aws"
|
||||
CONTEXT="capa"
|
||||
;;
|
||||
"azure"|"capz")
|
||||
PROVIDER="azure"
|
||||
CONTEXT="capz"
|
||||
;;
|
||||
"vsphere"|"capv")
|
||||
PROVIDER="vsphere"
|
||||
CONTEXT="capv"
|
||||
;;
|
||||
"cloud-director"|"capvcd")
|
||||
PROVIDER="cloud-director"
|
||||
CONTEXT="capvcd"
|
||||
;;
|
||||
"eks")
|
||||
PROVIDER="eks"
|
||||
CONTEXT="eks"
|
||||
;;
|
||||
"aws-private"|"capa-private")
|
||||
PROVIDER="aws"
|
||||
CONTEXT="capa-private-proxy"
|
||||
;;
|
||||
esac
|
||||
|
||||
cd ${CODE_DIR}
|
||||
go run ${CODE_DIR}/cmd/standup/main.go \
|
||||
--provider ${PROVIDER} \
|
||||
--context ${CONTEXT} \
|
||||
--output ${OUTPUT_DIR}
|
||||
cd ${CUR_DIR}
|
||||
;;
|
||||
*)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
@@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source .utils
|
||||
|
||||
CODE_DIR="${HOME}/Code/GiantSwarm/cluster-standup-teardown"
|
||||
CURR_DIR=$(pwd)
|
||||
INPUT_DIR=$(pwd)
|
||||
|
||||
print_usage() {
|
||||
orange "gs-teardown - Teardown a previously created test cluster"
|
||||
echo " "
|
||||
underline "Usage:"
|
||||
echo "gs-teardown [provider]"
|
||||
echo " "
|
||||
underline "Examples:"
|
||||
echo "> gs-teardown capa"
|
||||
echo "> gs-teardown eks"
|
||||
echo " "
|
||||
underline "Options:"
|
||||
echo "-h, --help show this help text"
|
||||
echo " --dir override the directory of cluster-test-suite code"
|
||||
echo " --in override the directory where the previously output files exist"
|
||||
}
|
||||
|
||||
POS_ARGS=()
|
||||
|
||||
while test $# -gt 0; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
--dir)
|
||||
shift
|
||||
CODE_DIR=$1
|
||||
shift
|
||||
;;
|
||||
--input)
|
||||
shift
|
||||
INPUT_DIR=$1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
POS_ARGS+=(`echo $1 | tr '/' ' '`)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
case ${#POS_ARGS[@]} in
|
||||
0)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
1)
|
||||
CONTEXT=""
|
||||
|
||||
case ${POS_ARGS[0]} in
|
||||
"aws"|"capa")
|
||||
CONTEXT="capa"
|
||||
;;
|
||||
"azure"|"capz")
|
||||
CONTEXT="capz"
|
||||
;;
|
||||
"vsphere"|"capv")
|
||||
CONTEXT="capv"
|
||||
;;
|
||||
"cloud-director"|"capvcd")
|
||||
CONTEXT="capvcd"
|
||||
;;
|
||||
"eks")
|
||||
CONTEXT="eks"
|
||||
;;
|
||||
"aws-private"|"capa-private")
|
||||
CONTEXT="capa-private-proxy"
|
||||
;;
|
||||
esac
|
||||
|
||||
cd ${CODE_DIR}
|
||||
go run ${CODE_DIR}/cmd/teardown/main.go \
|
||||
--context ${CONTEXT} \
|
||||
--standup-directory ${INPUT_DIR}
|
||||
cd ${CUR_DIR}
|
||||
;;
|
||||
*)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "⬆️ Updating tools ⬆️"
|
||||
echo
|
||||
|
||||
if which opsctl &>/dev/null ; then
|
||||
printf "⏳ opsctl..."
|
||||
opsctl version update --no-cache 1>/dev/null
|
||||
printf "\n\e[1A\e[K✅ opsctl - %s\n" $(opsctl version | grep "^Version:" | awk -F' ' '{print $NF}')
|
||||
fi
|
||||
|
||||
if which devctl &>/dev/null ; then
|
||||
printf "⏳ devctl... "
|
||||
devctl version update --no-cache 1>/dev/null
|
||||
printf "\n\e[1A\e[K✅ devctl - %s\n" $(devctl version | grep "^Version:" | awk -F' ' '{print $NF}')
|
||||
fi
|
||||
|
||||
if which kubectl-gs &>/dev/null ; then
|
||||
printf "⏳ kubectl-gs... "
|
||||
kubectl-gs selfupdate 1>/dev/null
|
||||
printf "\n\e[1A\e[K✅ kubectl-gs - %s\n" $(kubectl-gs --version | awk -F' ' '{print $NF}')
|
||||
fi
|
@@ -36,33 +36,27 @@ done
|
||||
|
||||
NAME=${2}
|
||||
|
||||
addLabelsAndAnnotations() {
|
||||
yq e '.metadata.labels."app.kubernetes.io/name" = "'${NAME}'" |
|
||||
.metadata.labels."giantswarm.io/user" = "'$(whoami)'" |
|
||||
.metadata.annotations."giantswarm.io/description" = ""' -
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
deployment|dp)
|
||||
kubectl create ${NAMESPACE} deployment ${NAME} --image=nginx:1.21 --dry-run=client -o yaml ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} deployment ${NAME} --image=nginx:1.21 --dry-run=client -o yaml ${@:3}
|
||||
;;
|
||||
ingress|in)
|
||||
kubectl create ${NAMESPACE} ingress ${NAME} --dry-run=client -o yaml --rule=example.com/=my-service:web ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} ingress ${NAME} --dry-run=client -o yaml --rule=example.com/=my-service:web ${@:3}
|
||||
;;
|
||||
service|svc)
|
||||
kubectl create ${NAMESPACE} service clusterip ${NAME} --dry-run=client -o yaml ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} service clusterip ${NAME} --dry-run=client -o yaml ${@:3}
|
||||
;;
|
||||
configmap|cm)
|
||||
kubectl create ${NAMESPACE} configmap ${NAME} --dry-run=client -o yaml ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} configmap ${NAME} --dry-run=client -o yaml ${@:3}
|
||||
;;
|
||||
secret|sec)
|
||||
kubectl create ${NAMESPACE} secret generic ${NAME} --dry-run=client -o yaml ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} secret generic ${NAME} --dry-run=client -o yaml ${@:3}
|
||||
;;
|
||||
cronjob|cj)
|
||||
kubectl create ${NAMESPACE} cronjob ${NAME} --image=alpine:latest --schedule="1 * * * *" --dry-run=client -o yaml ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} cronjob ${NAME} --image=alpine:latest --schedule="1 * * * *" --dry-run=client -o yaml ${@:3}
|
||||
;;
|
||||
job|jo)
|
||||
kubectl create ${NAMESPACE} job ${NAME} --image=alpine:latest --dry-run=client -o yaml ${@:3} | addLabelsAndAnnotations
|
||||
kubectl create ${NAMESPACE} job ${NAME} --image=alpine:latest --dry-run=client -o yaml ${@:3}
|
||||
;;
|
||||
esac
|
||||
|
||||
|
@@ -7,5 +7,3 @@ aliases:
|
||||
ro: roles
|
||||
rb: rolebindings
|
||||
np: networkpolicies
|
||||
cl: cluster.x-k8s.io/v1beta1/clusters
|
||||
app: application.giantswarm.io/v1alpha1/apps
|
||||
|
@@ -1,66 +1,4 @@
|
||||
plugins:
|
||||
sshnode:
|
||||
shortCut: s
|
||||
description: SSH via Teleport
|
||||
dangerous: false
|
||||
scopes:
|
||||
- nodes
|
||||
background: false
|
||||
command: bash
|
||||
args:
|
||||
- -c
|
||||
- tsh ssh root@cluster=$(echo "$CONTEXT" | rev | cut -d'-' -f1 | rev),node=$(echo "$NAME" | cut -d '.' -f 1)
|
||||
|
||||
aws:
|
||||
shortCut: w
|
||||
description: Open AWS Console
|
||||
dangerous: false
|
||||
scopes:
|
||||
- clusters
|
||||
- awsclusters
|
||||
background: false
|
||||
command: bash
|
||||
args:
|
||||
- -c
|
||||
- open "https://signin.aws.amazon.com/switchrole?account=$(kubectl --context ${CONTEXT} get awsclusterroleidentity $(kubectl --context ${CONTEXT} get awsclusters -n ${NAMESPACE} ${NAME} -o json | jq -r '.spec.identityRef.name') -o json | jq -r '.spec.roleARN | split(":")[4]')&roleName=GiantSwarmAdmin&displayName=${CONTEXT}+-+${NAME}"
|
||||
|
||||
clusterapps:
|
||||
shortCut: a
|
||||
description: List Apps
|
||||
dangerous: false
|
||||
scopes:
|
||||
- clusters
|
||||
background: false
|
||||
command: sh
|
||||
args:
|
||||
- -c
|
||||
- "viddy -n 5s 'kubectl get apps -n $NAMESPACE | grep $NAME'"
|
||||
|
||||
tree:
|
||||
shortCut: t
|
||||
description: Show Tree
|
||||
dangerous: false
|
||||
scopes:
|
||||
- clusters
|
||||
background: false
|
||||
command: sh
|
||||
args:
|
||||
- -c
|
||||
- "viddy -n 15s 'kubectl tree -n $NAMESPACE clusters.v1beta1.cluster.x-k8s.io $NAME'"
|
||||
|
||||
values:
|
||||
shortCut: v
|
||||
description: Show Values
|
||||
dangerous: false
|
||||
scopes:
|
||||
- clusters
|
||||
- apps
|
||||
background: false
|
||||
command: sh
|
||||
args:
|
||||
- -c
|
||||
- "kubectl get -o yaml cm ${NAME}-chart-values -n giantswarm | less"
|
||||
|
||||
# kubectl-blame by knight42
|
||||
# Annotate each line in the given resource's YAML with information from the managedFields to show who last modified the field.
|
||||
# Source: https://github.com/knight42/kubectl-blame
|
||||
|
58
install.sh
58
install.sh
@@ -6,17 +6,17 @@ export PATH="/home/linuxbrew/.linuxbrew/bin:/opt/homebrew/bin/:$PATH"
|
||||
[ -d /usr/local/share/zsh/site-functions ] || (sudo mkdir -p /usr/local/share/zsh/site-functions && sudo chmod 777 /usr/local/share/zsh/site-functions)
|
||||
|
||||
# Install homebrew
|
||||
echo ""
|
||||
echo "🔵 Installing homebrew"
|
||||
which brew >/dev/null || /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
brew tap homebrew/core
|
||||
printf " ✅\n"
|
||||
echo "✅"
|
||||
|
||||
# Install oh-my-zsh
|
||||
echo ""
|
||||
echo "🔵 Setting up zsh"
|
||||
printf "Cloning oh-my-zsh..."
|
||||
[ -d ${HOME}/.oh-my-zsh ] || sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
|
||||
printf " ✅\n"
|
||||
|
||||
# Install ZSH plugins
|
||||
printf "Cloning zsh plugins..."
|
||||
[ -d ${ZSH_CUSTOM:-${HOME}/.oh-my-zsh/custom}/plugins/zsh-autosuggestions ] || git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-${HOME}/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
|
||||
@@ -55,6 +55,13 @@ MAS_TOOLS=(
|
||||
1470584107 # Dato
|
||||
)
|
||||
|
||||
# Tools removed to be cleaned up
|
||||
REMOVED_BREW_TOOLS=(
|
||||
exa karabiner-elements kubectx
|
||||
)
|
||||
REMOVED_KREW_TOOLS=( gs )
|
||||
|
||||
echo ""
|
||||
echo "🔵 Installing / updating tools"
|
||||
|
||||
# Install Debian/Ubuntu specific packages if apt exists
|
||||
@@ -64,6 +71,7 @@ if command -v apt &>/dev/null; then
|
||||
fi
|
||||
|
||||
# Homebrew
|
||||
echo ""
|
||||
echo "🔵 Homebrew tools"
|
||||
export HOMEBREW_NO_INSTALL_CLEANUP=true
|
||||
for tool in "${BREW_TOOLS[@]}"
|
||||
@@ -78,6 +86,7 @@ do
|
||||
done
|
||||
|
||||
# Cargo
|
||||
echo ""
|
||||
echo "🔵 Cargo tools"
|
||||
for tool in "${CARGO_TOOLS[@]}"
|
||||
do
|
||||
@@ -91,6 +100,7 @@ do
|
||||
done
|
||||
|
||||
# Krew
|
||||
echo ""
|
||||
echo "🔵 Krew tools"
|
||||
kubectl-krew update &>/dev/null
|
||||
for tool in "${KREW_TOOLS[@]}"
|
||||
@@ -113,6 +123,7 @@ fulllink() {
|
||||
fi
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo "🔵 OS Specific setup"
|
||||
echo "Detected OS type: ${OSTYPE}"
|
||||
|
||||
@@ -122,6 +133,8 @@ case "${OSTYPE}" in
|
||||
;;
|
||||
*darwin*)
|
||||
# Mac specific setup
|
||||
echo ""
|
||||
echo "Instaling Mac-specific Brew tools..."
|
||||
for tool in "${MAC_BREW_TOOLS[@]}"
|
||||
do
|
||||
printf "${tool}..."
|
||||
@@ -134,6 +147,8 @@ case "${OSTYPE}" in
|
||||
done
|
||||
|
||||
# Mac App Store
|
||||
echo ""
|
||||
echo "Instaling Mac-specific App Store tools..."
|
||||
for tool in "${MAS_TOOLS[@]}"
|
||||
do
|
||||
printf "MAS ID: ${tool}..."
|
||||
@@ -145,6 +160,8 @@ case "${OSTYPE}" in
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Setting up config files"
|
||||
FILES=$(/usr/bin/find ./os-specific/darwin/home -maxdepth 1 -mindepth 1 | tr '\n' ' ')
|
||||
for file in $FILES
|
||||
do
|
||||
@@ -166,6 +183,7 @@ case "${OSTYPE}" in
|
||||
|
||||
|
||||
# Handle other files outside of the user's home directory
|
||||
echo ""
|
||||
echo "🔵 Handiling non-standard files:"
|
||||
# 1. Tabby config
|
||||
mkdir -p "/Users/${USER}/Library/Application Support/tabby"
|
||||
@@ -188,6 +206,7 @@ case "${OSTYPE}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "🔵 Adding configuration"
|
||||
FILES=$(/usr/bin/find ./home -maxdepth 1 -mindepth 1 | tr '\n' ' ')
|
||||
for file in $FILES
|
||||
@@ -198,3 +217,36 @@ do
|
||||
ln -sfn ${f} ${dst}
|
||||
printf " ✅\n"
|
||||
done
|
||||
|
||||
|
||||
echo ""
|
||||
echo "🔵 Updating installed tools..."
|
||||
brew upgrade
|
||||
mas upgrade
|
||||
|
||||
echo ""
|
||||
echo "🔵 Removing old Homebrew tools"
|
||||
export HOMEBREW_NO_INSTALL_CLEANUP=true
|
||||
for tool in "${REMOVED_BREW_TOOLS[@]}"
|
||||
do
|
||||
printf "${tool}..."
|
||||
brew uninstall ${tool} &>/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
printf " ✅\n"
|
||||
else
|
||||
printf " ❌\n"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "🔵 Removing old Krew tools"
|
||||
for tool in "${REMOVED_KREW_TOOLS[@]}"
|
||||
do
|
||||
printf "${tool}..."
|
||||
kubectl-krew uninstall ${tool} &>/dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
printf " ✅\n"
|
||||
else
|
||||
printf " ❌\n"
|
||||
fi
|
||||
done
|
||||
|
Reference in New Issue
Block a user