When the destroy semaphore is increased, we want to hop to the destroy label from any label. If the prog defines a before_run method, we call it just before running each label. Some of these methods have custom logic, but most of them are similar. To achieve 100% line coverage, we test similar functionality repeatedly. I’ve moved this to the base program. So, if the prog doesn’t override before_run and has a destroy label and semaphore, we call this basic flow in all progs. The custom logics in progs also follow similar patterns. We can gradually move them to the base prog as well.
85 lines
2.8 KiB
Ruby
85 lines
2.8 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
class Prog::Kubernetes::KubernetesClusterNexus < Prog::Base
|
|
subject_is :kubernetes_cluster
|
|
|
|
def self.assemble(name:, project_id:, location:, version: "v1.32", private_subnet_id: nil, cp_node_count: 3, target_node_size: "standard-2", target_node_storage_size_gib: nil)
|
|
DB.transaction do
|
|
unless (project = Project[project_id])
|
|
fail "No existing project"
|
|
end
|
|
|
|
unless ["v1.32", "v1.31"].include?(version)
|
|
fail "Invalid Kubernetes Version"
|
|
end
|
|
|
|
Validation.validate_kubernetes_name(name)
|
|
Validation.validate_kubernetes_cp_node_count(cp_node_count)
|
|
|
|
subnet = if private_subnet_id
|
|
project.private_subnets_dataset.first(id: private_subnet_id) || fail("Given subnet is not available in the given project")
|
|
else
|
|
subnet_name = name + "-k8s-subnet"
|
|
ps = project.private_subnets_dataset.first(:location => location, Sequel[:private_subnet][:name] => subnet_name)
|
|
ps || Prog::Vnet::SubnetNexus.assemble(
|
|
project_id,
|
|
name: subnet_name,
|
|
location:,
|
|
ipv4_range: Prog::Vnet::SubnetNexus.random_private_ipv4(location, project, 18).to_s
|
|
).subject
|
|
end
|
|
|
|
# TODO: Validate location
|
|
# TODO: Move resources (vms, subnet, LB, etc.) into our own project
|
|
# TODO: Validate node count
|
|
|
|
kc = KubernetesCluster.create_with_id(name:, version:, cp_node_count:, location:, target_node_size:, target_node_storage_size_gib:, project_id: project.id, private_subnet_id: subnet.id)
|
|
|
|
Strand.create(prog: "Kubernetes::KubernetesClusterNexus", label: "start") { _1.id = kc.id }
|
|
end
|
|
end
|
|
|
|
label def start
|
|
register_deadline("wait", 120 * 60)
|
|
hop_create_load_balancer
|
|
end
|
|
|
|
label def create_load_balancer
|
|
load_balancer_st = Prog::Vnet::LoadBalancerNexus.assemble(
|
|
kubernetes_cluster.private_subnet_id,
|
|
name: "#{kubernetes_cluster.name}-apiserver",
|
|
algorithm: "hash_based",
|
|
src_port: 443,
|
|
dst_port: 6443,
|
|
health_check_endpoint: "/healthz",
|
|
health_check_protocol: "tcp",
|
|
stack: LoadBalancer::Stack::IPV4
|
|
)
|
|
kubernetes_cluster.update(api_server_lb_id: load_balancer_st.id)
|
|
|
|
hop_bootstrap_control_plane_vms
|
|
end
|
|
|
|
label def bootstrap_control_plane_vms
|
|
nap 5 unless kubernetes_cluster.endpoint
|
|
|
|
hop_wait if kubernetes_cluster.cp_vms.count >= kubernetes_cluster.cp_node_count
|
|
|
|
push Prog::Kubernetes::ProvisionKubernetesNode
|
|
end
|
|
|
|
label def wait
|
|
nap 65536
|
|
end
|
|
|
|
label def destroy
|
|
kubernetes_cluster.api_server_lb.incr_destroy
|
|
kubernetes_cluster.cp_vms.each(&:incr_destroy)
|
|
kubernetes_cluster.remove_all_cp_vms
|
|
kubernetes_cluster.nodepools.each { _1.incr_destroy }
|
|
nap 5 unless kubernetes_cluster.nodepools.empty?
|
|
kubernetes_cluster.destroy
|
|
pop "kubernetes cluster is deleted"
|
|
end
|
|
end
|