-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathorchid-lib.sh
228 lines (196 loc) · 9.98 KB
/
orchid-lib.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
#!/usr/bin/env bash
#
# Require az cli 2.0.76+
# On Mac: brew upgrade azure-cli
# az extension update --name aks-preview
# az feature register --name NodePublicIPPreview --namespace Microsoft.ContainerService
# az account set --subscription 72738c1b-8ae6-4f23-8531-5796fe866f2e
set -x
HELM_REPO=/Users/vroyer/git/strapdata/strapkop/helm/src/main/helm
REGISTRY_SECRET_NAME="strapregistry"
ELASSANDRA_OPERATOR_TAG="6.2.3.22-SNAPSHOT"
function create_resource_group() {
az group create -l westeurope -n $RESOURCE_GROUP_NAME
}
function destroy_resource_group() {
az group delete -n $RESOURCE_GROUP_NAME
}
# $1 = vnet name
function create_vnet() {
az network vnet create --name vnet0 -g $RESOURCE_GROUP_NAME --address-prefix 10.0.0.0/17 --subnet-name subnet0 --subnet-prefix 10.0.0.0/24
}
function create-acr-rbac() {
eval $(az ad sp create-for-rbac \
--scopes /subscriptions/$SUBSCRIPTION_ID/resourcegroups/$ACR_RESOURCE_GROUP/providers/Microsoft.ContainerRegistry/registries/$ACR_NAME \
--role Reader \
--name $SERVICE_PRINCIPAL_NAME | jq -r '"export SPN_PW=\(.password) && export SPN_CLIENT_ID=\(.appId)"')
}
function create-acr-secret() {
kubectl create secret docker-registry $REGISTRY_SECRET_NAME --docker-server=https://strapdata.azurecr.io --docker-username="$SPN_CLIENT_ID" --docker-password="$SPN_PW" --docker-email="[email protected]"
}
# AKS zone availability (require VM Scale Set) does not allow to add public IPs on nodes because of the standard LB.
# So, keep AvailabilitySet deployment with no LB unless you deploy one.
# $1 = k8s cluster IDX
function create_aks_cluster() {
az network vnet subnet create -g $RESOURCE_GROUP_NAME --vnet-name vnet0 -n "subnet$1" --address-prefixes 10.0.$1.0/24
local B3=$((64+($1 -1)*16))
az aks create --name "${K8S_CLUSTER_NAME}${1}" \
--resource-group $RESOURCE_GROUP_NAME \
--ssh-key-value $SSH_PUBKEY \
--network-plugin azure \
--docker-bridge-address "192.168.0.1/24" \
--service-cidr "10.0.$B3.0/22" \
--dns-service-ip "10.0.$B3.10" \
--vnet-subnet-id $(az network vnet subnet show -g $RESOURCE_GROUP_NAME --vnet-name vnet0 -n "subnet$1" | jq -r '.id') \
--node-count 1 \
--node-vm-size Standard_D4_v3 \
--vm-set-type AvailabilitySet \
--output table
# --attach-acr "$ACR_ID"
# --load-balancer-sku basic
# --load-balancer-managed-outbound-ip-count 0 \
kubectl create clusterrolebinding kubernetes-dashboard -n kube-system --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard
use_k8s_cluster $1
}
# $1 = k8s cluster IDX
# $2 = node index
function add_public_ip() {
AKS_RG_NAME=$(az resource show --namespace Microsoft.ContainerService --resource-type managedClusters -g $RESOURCE_GROUP_NAME -n "${K8S_CLUSTER_NAME}${1}" | jq -r .properties.nodeResourceGroup)
AKS_NODE=$(az vm list --resource-group MC_koptest2_kube2_westeurope | jq -r ".[$2] .name")
az network nic ip-config list --nic-name "${AKS_NODE::-2}-nic-2" -g $AKS_RG_NAME
az network public-ip create -g $RESOURCE_GROUP_NAME --name "${K8S_CLUSTER_NAME}${1}-ip$2" --dns-name "${K8S_CLUSTER_NAME}${1}-pub${2}" --sku Standard
az network nic ip-config update -g $AKS_RG_NAME --nic-name "${AKS_NODE::-2}-nic-2" --name ipconfig1 --public-ip-address "${K8S_CLUSTER_NAME}${1}-ip$2"
}
# require AKS with VM ScaleSet
# $1 = k8s cluster IDX
function add_nodepool2() {
az aks nodepool add --cluster-name "${K8S_CLUSTER_NAME}${1}" \
--resource-group $RESOURCE_GROUP_NAME \
--name nodepool2 \
--node-vm-size Standard_B2S \
--node-count 1
}
function nodes_zone() {
kubectl get nodes -o wide -L failure-domain.beta.kubernetes.io/zone
}
# $1 = k8s cluster IDX
function destroy_aks_cluster() {
az aks delete --name "${K8S_CLUSTER_NAME}${1}" --resource-group $RESOURCE_GROUP_NAME
}
# $1 = kube index
function use_k8s_cluster() {
az aks get-credentials --name "${K8S_CLUSTER_NAME}${1}" --resource-group $RESOURCE_GROUP_NAME --output table
kubectl config use-context $NAMESPACE
kubectl config set-context $RESOURCE_GROUP_NAME --cluster=${K8S_CLUSTER_NAME}${1}
}
function init_helm() {
helm init
kubectl -n kube-system get po || helm init
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
helm init --wait
}
# deploy the elassandra operator in $1 = namespace
function install_elassandra_operator() {
helm install --namespace ${1:-default} --name strapkop \
--set image.pullSecrets[0]="$REGISTRY_SECRET_NAME" \
--set image.tag="$OPERATOR_TAG" \
$HELM_REPO/elassandra-operator
}
# Deploy single node cluster in namespace=cluster_name
# $1 = cluster name
# $2 = datacenter name
function install_singlenode_elassandra_datacenter() {
helm install --namespace default --name ${1:-"cl1"}-${2:-"dc1"} \
--set image.pullSecrets[0]=$REGISTRY_SECRET_NAME \
--set image.tag="$OPERATOR_TAG" \
--set reaper.enabled=false \
--set kibana.enabled=true \
--set replicas=1 \
--wait \
$HELM_REPO/elassandra-datacenter
echo "Datacenter $2 deployed in namespace $1"
}
function deploy_traefik_acme() {
echo "Deploying traefik proxy with domain=${1:-$DNS_DOMAIN}"
helm install $HELM_DEBUG --name traefik --namespace kube-system \
--set rbac.enabled=true,debug.enabled=true \
--set ssl.enabled=true,ssl.enforced=true \
--set acme.enabled=true,acme.email="[email protected]",acme.storage="acme.json" \
--set acme.logging=true,acme.staging=false \
--set acme.challengeType="dns-01" \
--set acme.caServer="https://acme-v02.api.letsencrypt.org/directory" \
--set acme.dnsProvider.name="azure" \
--set acme.dnsProvider.azure.AZURE_SUBSCRIPTION_ID="72738c1b-8ae6-4f23-8531-5796fe866f2e" \
--set acme.dnsProvider.azure.AZURE_RESOURCE_GROUP="strapcloud.com" \
--set acme.dnsProvider.azure.AZURE_CLIENT_ID="55aa320e-f341-4db8-8d3b-e28d1a41cb67" \
--set acme.dnsProvider.azure.AZURE_CLIENT_SECRET="5c97ee08-7783-437f-899e-7b4c4e84874c" \
--set acme.dnsProvider.azure.AZURE_TENANT_ID="566af820-2f8c-45ac-b975-647d2647b277" \
--set acme.domains.enabled=true \
--set acme.domains.domainsList[0].main=*.${1:-$DNS_DOMAIN} \
--set dashboard.enabled=true,dashboard.domain=traefik.${1:-$DNS_DOMAIN} \
stable/traefik
echo "done."
}
function update_traefik() {
helm upgrade $HELM_DEBUG -f traefik-values.yaml traefik stable/traefik
}
# $1 = cluster name
# $2 = datacenter name
function install_elassandra_datacenter() {
helm install --namespace $NAMESPACE --name ${1:-"cl1"}-${2:-"dc1"} \
--set image.pullSecrets[0]=$REGISTRY_SECRET_NAME \
--set replicas=1 \
--wait \
$HELM_REPO/elassandra-datacenter
}
function create_namespace() {
echo "Creating namespace $1"
kubectl create namespace $1
kubectl config set-context --current --namespace=$1
echo "Created namespace $1"
}
function generate_ca_cert() {
echo "generating root CA"
openssl genrsa -out MyRootCA.key 2048
openssl req -x509 -new -nodes -key MyRootCA.key -sha256 -days 1024 -out MyRootCA.pem
}
function generate_client_cert() {
echo "generate client certificate"
openssl genrsa -out MyClient1.key 2048
openssl req -new -key MyClient1.key -out MyClient1.csr
openssl x509 -req -in MyClient1.csr -CA MyRootCA.pem -CAkey MyRootCA.key -CAcreateserial -out MyClient1.pem -days 1024 -sha256
openssl pkcs12 -export -out MyClient.p12 -inkey MyClient1.key -in MyClient1.pem -certfile MyRootCA.pem
}
function view_cert() {
openssl x509 -text -in $1
}
function k8s_dashboard_role() {
kubectl create clusterrolebinding kubernetes-dashboard -n kube-system --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard
}
#---------------------------------------------------------------
function deploy_prometheus_operator() {
helm install $HELM_DEBUG --name my-promop \
--set prometheus.ingress.enabled=true,prometheus.ingress.hosts[0]="prometheus.${DNS_DOMAIN}",prometheus.ingress.annotations."kubernetes\.io/ingress\.class"="traefik" \
--set alertmanager.ingress.enabled=true,alertmanager.ingress.hosts[0]="alertmanager.${DNS_DOMAIN}",alertmanager.ingress.annotations."kubernetes\.io/ingress\.class"="traefik" \
--set grafana.ingress.enabled=true,grafana.ingress.hosts[0]="grafana.${DNS_DOMAIN}",grafana.ingress.annotations."kubernetes\.io/ingress\.class"="traefik" \
-f prometheus-operator-values.yaml \
strapdata/prometheus-operator
}
function upgrade_prometheus_operator() {
helm upgrade $HELM_DEBUG \
--set prometheus.ingress.enabled=true,prometheus.ingress.hosts[0]="prometheus.${DNS_DOMAIN}",prometheus.ingress.annotations."kubernetes\.io/ingress\.class"="traefik" \
--set alertmanager.ingress.enabled=true,alertmanager.ingress.hosts[0]="alertmanager.${DNS_DOMAIN}",alertmanager.ingress.annotations."kubernetes\.io/ingress\.class"="traefik" \
--set grafana.ingress.enabled=true,grafana.ingress.hosts[0]="grafana.${DNS_DOMAIN}",grafana.ingress.annotations."kubernetes\.io/ingress\.class"="traefik" \
--set alertmanager.config.global.slack_api_url="https://hooks.slack.com/services/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" \
-f prometheus-operator-values.yaml \
my-promop strapdata/prometheus-operator
}
function undeploy_prometheus_operator() {
kubectl delete crd prometheuses.monitoring.coreos.com
kubectl delete crd prometheusrules.monitoring.coreos.com
kubectl delete crd servicemonitors.monitoring.coreos.com
kubectl delete crd alertmanagers.monitoring.coreos.com
helm delete --purge my-promop
}