Files
ubicloud/spec/prog/vnet/load_balancer_health_probes_spec.rb
Jeremy Evans 215f09541a Make access_tag only for project <-> accounts join table
For the includers of HyperTagMethods, this changes the authorization
code and object_tag member validation code to look at the project_id
column for the object, instead of looking a row with the project and
object in the access_tag table.

This removes all calls to associate_with_project, other than
those for Account.  It removes the projects association for
the includers of HyperTagMethods, and adds a project association to
the models that didn't already have one, since there is only a single
project for each object now.

Most HyperTagMethods code is inlined into Account, since it is only
user of the code now.  Temporarily, other models will still include
HyperTagMethods for the before_destroy hook, but eventually it will
go away completely.

The associations in Projects that previous used access_tag as a join
table, are changed from many_to_many to one_to_many, except for
Account (which still uses the join table).

Project#has_resources now needs separate queries for all of the
resource classes to see if there any associated objects.

This causes a lot of fallout in the specs, but unfortunately that is
unavoidable due the extensive use of projects.first in the specs to
get the related project for the objects, as well as the extensive
use of associate_with_project.
2025-01-17 08:32:46 -08:00

112 lines
6.8 KiB
Ruby

# frozen_string_literal: true
RSpec.describe Prog::Vnet::LoadBalancerHealthProbes do
subject(:nx) {
described_class.new(st)
}
let(:st) {
Strand.create_with_id(prog: "Vnet::LoadBalancerHealthProbes", stack: [{"subject_id" => lb.id, "vm_id" => vm.id}], label: "health_probe")
}
let(:lb) {
prj = Project.create_with_id(name: "test-prj")
ps = Prog::Vnet::SubnetNexus.assemble(prj.id, name: "test-ps").subject
dz = DnsZone.create_with_id(name: "test-dns-zone", project_id: prj.id)
cert = Prog::Vnet::CertNexus.assemble("test-host-name", dz.id).subject
lb = Prog::Vnet::LoadBalancerNexus.assemble(ps.id, name: "test-lb", src_port: 80, dst_port: 80).subject
lb.add_cert(cert)
lb
}
let(:vm) {
Prog::Vm::Nexus.assemble("pub-key", lb.project_id, name: "test-vm", private_subnet_id: lb.private_subnet.id).subject
}
before do
allow(nx).to receive_messages(load_balancer: lb)
end
describe "#health_probe" do
let(:vmh) {
instance_double(VmHost, sshable: instance_double(Sshable))
}
before do
allow(vm).to receive_messages(vm_host: vmh, ephemeral_net6: NetAddr::IPv6Net.parse("2a01:4f8:10a:128b:814c::/79"))
lb.add_vm(vm)
lb.load_balancers_vms_dataset.update(state: "up")
expect(Vm).to receive(:[]).with(vm.id).and_return(vm)
allow(vm.nics.first).to receive(:private_ipv4).and_return(NetAddr::IPv4Net.parse("192.168.1.0"))
end
it "naps for 5 seconds and doesn't perform update if health check succeeds" do
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:192.168.1.0 --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("200")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:[2a01:4f8:10a:128b:814c::2] --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("200")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "naps for 5 seconds and doesn't perform update if health check fails the first time" do
lb.load_balancers_vms_dataset.update(state_counter: 1)
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:192.168.1.0 --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("500")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:[2a01:4f8:10a:128b:814c::2] --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("500")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "naps for 5 seconds and performs update if health check fails the first time via an exception" do
lb.load_balancers_vms_dataset.update(state_counter: 1)
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:192.168.1.0 --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_raise("error")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:[2a01:4f8:10a:128b:814c::2] --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_raise("error")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "starts update if health check succeeds and we hit the threshold" do
lb.load_balancers_vms_dataset.update(state_counter: 2)
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:192.168.1.0 --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("200")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:[2a01:4f8:10a:128b:814c::2] --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("200")
expect(lb).to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "naps for 5 seconds and doesn't perform update if health check succeeds and we're already above threshold" do
lb.load_balancers_vms_dataset.update(state_counter: 3)
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:192.168.1.0 --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("200")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:[2a01:4f8:10a:128b:814c::2] --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_return("200")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "uses nc for tcp health checks" do
lb.update(health_check_protocol: "tcp")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} nc -z -w 15 192.168.1.0 80 && echo 200 || echo 400").and_return("200")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} nc -z -w 15 2a01:4f8:10a:128b:814c::2 80 && echo 200 || echo 400").and_return("200")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "uses nc for tcp health checks but only for ipv4 if lb is ipv4 only" do
lb.update(stack: "ipv4")
lb.update(health_check_protocol: "tcp")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} nc -z -w 15 192.168.1.0 80 && echo 200 || echo 400").and_return("200")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
it "naps for 5 seconds and performs update if health check fails the first time via an exception but only for ipv6 only if lb is ipv6 only" do
lb.update(stack: "ipv6")
expect(vmh.sshable).to receive(:cmd).with("sudo ip netns exec #{vm.inhost_name} curl --insecure --resolve #{lb.hostname}:80:[2a01:4f8:10a:128b:814c::2] --max-time 15 --silent --output /dev/null --write-out '%{http_code}' http://#{lb.hostname}:80/up").and_raise("error")
expect(lb).not_to receive(:incr_update_load_balancer)
expect { nx.health_probe }.to nap(30)
end
end
end