When the destroy semaphore is increased, we want to hop to the destroy label from any label. If the prog defines a before_run method, we call it just before running each label. Some of these methods have custom logic, but most of them are similar. To achieve 100% line coverage, we test similar functionality repeatedly. I’ve moved this to the base program. So, if the prog doesn’t override before_run and has a destroy label and semaphore, we call this basic flow in all progs. The custom logics in progs also follow similar patterns. We can gradually move them to the base prog as well.
105 lines
3.6 KiB
Ruby
105 lines
3.6 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
require_relative "../../model/spec_helper"
|
|
|
|
RSpec.describe Prog::Kubernetes::KubernetesNodepoolNexus do
|
|
subject(:nx) { described_class.new(Strand.new(id: "8148ebdf-66b8-8ed0-9c2f-8cfe93f5aa77")) }
|
|
|
|
let(:project) { Project.create(name: "default") }
|
|
let(:subnet) { PrivateSubnet.create(net6: "0::0", net4: "127.0.0.1", name: "x", location: "x", project_id: project.id) }
|
|
|
|
let(:kc) {
|
|
kc = KubernetesCluster.create(
|
|
name: "k8scluster",
|
|
version: "v1.32",
|
|
cp_node_count: 3,
|
|
private_subnet_id: subnet.id,
|
|
location: "hetzner-fsn1",
|
|
project_id: project.id,
|
|
target_node_size: "standard-2"
|
|
)
|
|
|
|
lb = LoadBalancer.create(private_subnet_id: subnet.id, name: "somelb", src_port: 123, dst_port: 456, health_check_endpoint: "/foo", project_id: project.id)
|
|
kc.add_cp_vm(create_vm)
|
|
kc.add_cp_vm(create_vm)
|
|
kc.update(api_server_lb_id: lb.id)
|
|
kc
|
|
}
|
|
|
|
let(:kn) { KubernetesNodepool.create(name: "k8stest-np", node_count: 2, kubernetes_cluster_id: kc.id, target_node_size: "standard-2") }
|
|
|
|
before do
|
|
allow(nx).to receive(:kubernetes_nodepool).and_return(kn)
|
|
end
|
|
|
|
describe ".assemble" do
|
|
it "validates input" do
|
|
expect {
|
|
described_class.assemble(name: "name", node_count: 2, kubernetes_cluster_id: SecureRandom.uuid)
|
|
}.to raise_error RuntimeError, "No existing cluster"
|
|
end
|
|
|
|
it "creates a kubernetes nodepool" do
|
|
st = described_class.assemble(name: "k8stest-np", node_count: 2, kubernetes_cluster_id: kc.id, target_node_size: "standard-4", target_node_storage_size_gib: 37)
|
|
kn = st.subject
|
|
|
|
expect(kn.name).to eq "k8stest-np"
|
|
expect(kn.ubid).to start_with("kn")
|
|
expect(kn.kubernetes_cluster_id).to eq kc.id
|
|
expect(kn.node_count).to eq 2
|
|
expect(st.label).to eq "start"
|
|
expect(kn.target_node_size).to eq "standard-4"
|
|
expect(kn.target_node_storage_size_gib).to eq 37
|
|
end
|
|
|
|
it "can have null as storage size" do
|
|
st = described_class.assemble(name: "k8stest-np", node_count: 2, kubernetes_cluster_id: kc.id, target_node_size: "standard-4", target_node_storage_size_gib: nil)
|
|
|
|
expect(st.subject.target_node_storage_size_gib).to be_nil
|
|
end
|
|
end
|
|
|
|
describe "#start" do
|
|
it "naps if the kubernetes cluster is not ready" do
|
|
expect(kn.cluster).to receive(:strand).and_return(Strand.new(label: "not-wait"))
|
|
expect { nx.start }.to nap(30)
|
|
end
|
|
|
|
it "registers a deadline and hops if the cluster is ready" do
|
|
expect(kn.cluster).to receive(:strand).and_return(Strand.new(label: "wait"))
|
|
expect(nx).to receive(:register_deadline)
|
|
expect { nx.start }.to hop("bootstrap_worker_vms")
|
|
end
|
|
end
|
|
|
|
describe "#bootstrap_worker_vms" do
|
|
it "hops wait if the target number of vms is reached" do
|
|
expect(kn).to receive(:vms).and_return [1, 2]
|
|
expect { nx.bootstrap_worker_vms }.to hop("wait")
|
|
end
|
|
|
|
it "pushes ProvisionKubernetesNode prog to create VMs" do
|
|
expect(nx).to receive(:push).with(Prog::Kubernetes::ProvisionKubernetesNode, {"nodepool_id" => kn.id, "subject_id" => kn.cluster.id})
|
|
nx.bootstrap_worker_vms
|
|
end
|
|
end
|
|
|
|
describe "#wait" do
|
|
it "just naps for 30 seconds for now" do
|
|
expect { nx.wait }.to nap(30)
|
|
end
|
|
end
|
|
|
|
describe "#destroy" do
|
|
it "destroys the nodepool and its vms" do
|
|
vms = [create_vm, create_vm]
|
|
expect(kn).to receive(:vms).and_return(vms)
|
|
|
|
expect(vms).to all(receive(:incr_destroy))
|
|
expect(kn).to receive(:remove_all_vms)
|
|
expect(kn).to receive(:destroy)
|
|
expect { nx.destroy }.to exit({"msg" => "kubernetes nodepool is deleted"})
|
|
end
|
|
end
|
|
end
|