Previously, we gave names like "clustername-control-plane-something" to k8s cluster nodes in CP side and let them default to VM inhost names in the DP side. This commit changes the node names to cluster and nodepool UBID based strings in CP and DP. The current format is something like {(cluster|nodepool).ubid}-{r4nd0}. Alternatively, we can provide VM ubids as names to k8s and use them. However, associating VMs with cluster or nodepools in the names seemed more intuitive to me. One technicality I couldn't figure out was the node name setting during init phase. I initially tried supplying the node name through `nodeRegistration.name` in the kubeadm-config.yaml, but couldn't make it work.
91 lines
2.4 KiB
Ruby
Executable file
91 lines
2.4 KiB
Ruby
Executable file
#!/bin/env ruby
|
|
# frozen_string_literal: true
|
|
|
|
require "json"
|
|
require "yaml"
|
|
require_relative "../../common/lib/util"
|
|
|
|
params = JSON.parse($stdin.read)
|
|
|
|
begin
|
|
cluster_name = params.fetch("cluster_name")
|
|
lb_hostname = params.fetch("lb_hostname")
|
|
port = params.fetch("port")
|
|
private_subnet_cidr4 = params.fetch("private_subnet_cidr4")
|
|
private_subnet_cidr6 = params.fetch("private_subnet_cidr6")
|
|
vm_cidr = params.fetch("vm_cidr")
|
|
node_name = params.fetch("node_name")
|
|
rescue KeyError => e
|
|
puts "Needed #{e.key} in parameters"
|
|
exit 1
|
|
end
|
|
service_account_name = "k8s-access"
|
|
secret_name = service_account_name
|
|
|
|
config = {
|
|
"apiVersion" => "kubeadm.k8s.io/v1beta3",
|
|
"kind" => "ClusterConfiguration",
|
|
"clusterName" => cluster_name,
|
|
"kubernetesVersion" => "stable",
|
|
"controlPlaneEndpoint" => "#{lb_hostname}:#{port}",
|
|
"apiServer" => {
|
|
"certSANs" => [
|
|
lb_hostname
|
|
]
|
|
},
|
|
"networking" => {
|
|
"podSubnet" => "#{private_subnet_cidr4},#{private_subnet_cidr6}",
|
|
"dualStack" => true
|
|
},
|
|
"controllerManager" => {
|
|
"extraArgs" => {
|
|
"allocate-node-cidrs" => "false"
|
|
}
|
|
},
|
|
"nodeRegistration" => {
|
|
"kubeletExtraArgs" => {
|
|
"pod-cidr" => vm_cidr
|
|
}
|
|
},
|
|
"etcd" => {
|
|
"local" => {
|
|
"dataDir" => "/var/lib/etcd"
|
|
}
|
|
}
|
|
}
|
|
|
|
config_path = "/tmp/kubeadm-config.yaml"
|
|
|
|
safe_write_to_file(config_path, config.to_yaml)
|
|
|
|
r "sudo kubeadm init --config #{config_path} --node-name #{node_name}"
|
|
|
|
r("sudo /home/ubi/kubernetes/bin/setup-cni")
|
|
|
|
api_server_up = false
|
|
5.times do
|
|
r("kubectl --kubeconfig=/etc/kubernetes/admin.conf get --raw='/healthz'")
|
|
api_server_up = true
|
|
break
|
|
rescue CommandFail
|
|
puts "API server is not up yet, retrying in 5 seconds..."
|
|
sleep 5
|
|
end
|
|
unless api_server_up
|
|
puts "API server is not healthy. Could not create customer credentials."
|
|
exit 1
|
|
end
|
|
|
|
r "kubectl --kubeconfig /etc/kubernetes/admin.conf -n kube-system create serviceaccount #{service_account_name}"
|
|
r "kubectl --kubeconfig /etc/kubernetes/admin.conf -n kube-system create clusterrolebinding #{service_account_name}-binding --clusterrole=cluster-admin --serviceaccount=kube-system:#{service_account_name}"
|
|
r "kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f - <<EOF
|
|
apiVersion: v1
|
|
kind: Secret
|
|
metadata:
|
|
name: #{secret_name}
|
|
namespace: kube-system
|
|
annotations:
|
|
kubernetes.io/service-account.name: #{service_account_name}
|
|
type: kubernetes.io/service-account-token
|
|
EOF
|
|
"
|