ubicloud/spec/prog/github/github_repository_nexus_spec.rb
Jeremy Evans 215f09541a Make access_tag only for project <-> accounts join table
For the includers of HyperTagMethods, this changes the authorization
code and object_tag member validation code to look at the project_id
column for the object, instead of looking a row with the project and
object in the access_tag table.

This removes all calls to associate_with_project, other than
those for Account.  It removes the projects association for
the includers of HyperTagMethods, and adds a project association to
the models that didn't already have one, since there is only a single
project for each object now.

Most HyperTagMethods code is inlined into Account, since it is only
user of the code now.  Temporarily, other models will still include
HyperTagMethods for the before_destroy hook, but eventually it will
go away completely.

The associations in Projects that previous used access_tag as a join
table, are changed from many_to_many to one_to_many, except for
Account (which still uses the join table).

Project#has_resources now needs separate queries for all of the
resource classes to see if there any associated objects.

This causes a lot of fallout in the specs, but unfortunately that is
unavoidable due the extensive use of projects.first in the specs to
get the related project for the objects, as well as the extensive
use of associate_with_project.
2025-01-17 08:32:46 -08:00

225 lines
11 KiB
Ruby

# frozen_string_literal: true
require_relative "../../model/spec_helper"
require "octokit"
RSpec.describe Prog::Github::GithubRepositoryNexus do
subject(:nx) {
described_class.new(Strand.new).tap {
_1.instance_variable_set(:@github_repository, github_repository)
}
}
let(:github_repository) {
GithubRepository.new(name: "ubicloud/ubicloud", last_job_at: Time.now).tap {
_1.id = "31b9c46a-602a-8616-ae2f-41775cb592dd"
}
}
describe ".assemble" do
it "creates github repository or updates last_job_at if the repository exists" do
project = Project.create_with_id(name: "default")
installation = GithubInstallation.create_with_id(installation_id: 123, project_id: project.id, name: "test-user", type: "User")
expect {
described_class.assemble(installation, "ubicloud/ubicloud", "master")
}.to change(GithubRepository, :count).from(0).to(1)
now = Time.now.round(6)
expect(Time).to receive(:now).and_return(now).at_least(:once)
st = described_class.assemble(installation, "ubicloud/ubicloud", "main")
expect(GithubRepository.count).to eq(1)
expect(Strand.count).to eq(1)
expect(st.subject.last_job_at).to eq(now)
expect(st.subject.default_branch).to eq("main")
described_class.assemble(installation, "ubicloud/ubicloud", nil)
expect(st.subject.default_branch).to eq("main")
end
end
describe ".check_queued_jobs" do
let(:client) { instance_double(Octokit::Client) }
before do
allow(Github).to receive(:installation_client).and_return(client)
allow(client).to receive(:auto_paginate=)
installation = instance_double(GithubInstallation, installation_id: "123")
expect(github_repository).to receive(:installation).and_return(installation).at_least(:once)
expect(installation).to receive(:project).and_return(instance_double(Project, active?: true)).at_least(:once)
end
it "creates extra runner if needed" do
expect(client).to receive(:repository_workflow_runs).and_return({workflow_runs: [
{id: 1, run_attempt: 2, status: "queued"},
{id: 2, run_attempt: 1, status: "queued"}
]})
expect(client).to receive(:rate_limit).and_return(instance_double(Octokit::RateLimit, remaining: 100, limit: 100)).at_least(:once)
expect(client).to receive(:workflow_run_attempt_jobs).with("ubicloud/ubicloud", 1, 2).and_return({jobs: [
{status: "queued", labels: ["ubuntu-latest"]},
{status: "queued", labels: ["ubicloud"]},
{status: "queued", labels: ["ubicloud"]},
{status: "queued", labels: ["ubicloud-standard-4"]},
{status: "queued", labels: ["ubicloud-standard-8"]},
{status: "failed", labels: ["ubicloud"]}
]})
expect(client).to receive(:workflow_run_attempt_jobs).with("ubicloud/ubicloud", 2, 1).and_return({jobs: [
{status: "queued", labels: ["ubicloud"]}
]})
expect(github_repository).to receive(:runners_dataset).and_return(instance_double(Sequel::Dataset)).at_least(:once)
expect(github_repository.runners_dataset).to receive(:where).with(label: "ubicloud", workflow_job: nil).and_return([instance_double(GithubRunner)])
expect(github_repository.runners_dataset).to receive(:where).with(label: "ubicloud-standard-4", workflow_job: nil).and_return([])
expect(github_repository.runners_dataset).to receive(:where).with(label: "ubicloud-standard-8", workflow_job: nil).and_return([instance_double(GithubRunner)])
expect(Prog::Vm::GithubRunner).to receive(:assemble).with(github_repository.installation, repository_name: "ubicloud/ubicloud", label: "ubicloud").twice
expect(Prog::Vm::GithubRunner).to receive(:assemble).with(github_repository.installation, repository_name: "ubicloud/ubicloud", label: "ubicloud-standard-4")
expect(Prog::Vm::GithubRunner).not_to receive(:assemble).with(github_repository.installation, repository_name: "ubicloud/ubicloud", label: "ubicloud-standard-8")
nx.check_queued_jobs
expect(nx.polling_interval).to eq(5 * 60)
end
it "naps until the resets_at if remaining quota is low" do
expect(client).to receive(:repository_workflow_runs).and_return({workflow_runs: []})
now = Time.now
expect(client).to receive(:rate_limit).and_return(instance_double(Octokit::RateLimit, remaining: 8, limit: 100, resets_at: now + 8 * 60)).at_least(:once)
expect(Time).to receive(:now).and_return(now)
nx.check_queued_jobs
expect(nx.polling_interval).to eq(8 * 60)
end
it "increases polling interval if remaining quota is lower than 0.5" do
expect(client).to receive(:repository_workflow_runs).and_return({workflow_runs: []})
expect(client).to receive(:rate_limit).and_return(instance_double(Octokit::RateLimit, remaining: 40, limit: 100)).at_least(:once)
nx.check_queued_jobs
expect(nx.polling_interval).to eq(15 * 60)
end
it "does not poll jobs if the project is not active" do
expect(github_repository.installation.project).to receive(:active?).and_return(false)
nx.check_queued_jobs
expect(nx.polling_interval).to eq(24 * 60 * 60)
end
end
describe ".cleanup_cache" do
let(:blob_storage_client) { instance_double(Aws::S3::Client) }
before do
project = Project.create_with_id(name: "test")
installation = GithubInstallation.create_with_id(installation_id: 123, project_id: project.id, name: "test-user", type: "User")
github_repository.installation_id = installation.id
github_repository.save_changes
allow(Aws::S3::Client).to receive(:new).and_return(blob_storage_client)
end
def create_cache_entry(**args)
defaults = {key: "k#{Random.rand}", version: "v1", scope: "main", repository_id: github_repository.id, created_by: "3c9a861c-ab14-8218-a175-875ebb652f7b", committed_at: Time.now}
GithubCacheEntry.create_with_id(**defaults.merge(args))
end
it "deletes cache entries that not accessed in the last 7 days" do
create_cache_entry(last_accessed_at: Time.now - 6 * 24 * 60 * 60)
ten_days_old = create_cache_entry(last_accessed_at: Time.now - 10 * 24 * 60 * 60)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: ten_days_old.blob_key)
nx.cleanup_cache
end
it "deletes cache entries that older than 7 days not accessed yet" do
six_days_old = create_cache_entry(last_accessed_at: nil, created_at: Time.now - 6 * 24 * 60 * 60)
ten_days_old = create_cache_entry(last_accessed_at: nil, created_at: Time.now - 10 * 24 * 60 * 60)
expect(blob_storage_client).not_to receive(:delete_object).with(bucket: github_repository.bucket_name, key: six_days_old.blob_key)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: ten_days_old.blob_key)
nx.cleanup_cache
end
it "deletes oldest cache entries if the total usage exceeds the default limit" do
twenty_nine_gib_cache = create_cache_entry(created_at: Time.now - 10 * 60, size: 29 * 1024 * 1024 * 1024)
two_gib_cache = create_cache_entry(created_at: Time.now - 11 * 60, size: 2 * 1024 * 1024 * 1024)
three_gib_cache = create_cache_entry(created_at: Time.now - 12 * 60, size: 3 * 1024 * 1024 * 1024)
expect(blob_storage_client).not_to receive(:delete_object).with(bucket: github_repository.bucket_name, key: twenty_nine_gib_cache.blob_key)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: two_gib_cache.blob_key)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: three_gib_cache.blob_key)
nx.cleanup_cache
end
it "excludes uncommitted cache entries" do
thirty_two_gib_cache = create_cache_entry(created_at: Time.now - 10 * 60, size: 32 * 1024 * 1024 * 1024)
create_cache_entry(created_at: Time.now - 13 * 60, size: nil)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: thirty_two_gib_cache.blob_key)
nx.cleanup_cache
end
it "deletes oldest cache entries if the total usage exceeds the custom limit" do
github_repository.installation.project.add_quota(quota_id: ProjectQuota.default_quotas["GithubRunnerCacheStorage"]["id"], value: 20)
nine_gib_cache = create_cache_entry(created_at: Time.now - 10 * 60, size: 19 * 1024 * 1024 * 1024)
two_gib_cache = create_cache_entry(created_at: Time.now - 11 * 60, size: 2 * 1024 * 1024 * 1024)
three_gib_cache = create_cache_entry(created_at: Time.now - 12 * 60, size: 3 * 1024 * 1024 * 1024)
expect(blob_storage_client).not_to receive(:delete_object).with(bucket: github_repository.bucket_name, key: nine_gib_cache.blob_key)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: two_gib_cache.blob_key)
expect(blob_storage_client).to receive(:delete_object).with(bucket: github_repository.bucket_name, key: three_gib_cache.blob_key)
nx.cleanup_cache
end
end
describe "#before_run" do
it "hops to destroy when needed" do
expect(nx).to receive(:when_destroy_set?).and_yield
expect(nx).to receive(:register_deadline)
expect { nx.before_run }.to hop("destroy")
end
it "does not hop to destroy if already in the destroy state" do
expect(nx).to receive(:when_destroy_set?).and_yield
expect(nx.strand).to receive(:label).and_return("destroy")
expect { nx.before_run }.not_to hop("destroy")
end
end
describe "#wait" do
it "checks queued jobs and cache usage then naps" do
expect(github_repository).to receive(:access_key).and_return("key")
expect(nx).to receive(:check_queued_jobs)
expect(nx).to receive(:cleanup_cache)
expect { nx.wait }.to nap(5 * 60)
end
it "does not check queued jobs but check cache usage if 6 hours passed from the last job" do
expect(github_repository).to receive(:access_key).and_return("key")
expect(github_repository).to receive(:last_job_at).and_return(Time.now - 7 * 60 * 60)
expect(nx).not_to receive(:check_queued_jobs)
expect(nx).to receive(:cleanup_cache)
expect { nx.wait }.to nap(15 * 60)
end
it "does not destroys repository and if not found but has active runners" do
expect(nx).to receive(:check_queued_jobs).and_raise(Octokit::NotFound)
expect(github_repository).to receive(:runners).and_return([instance_double(GithubRunner)])
expect(github_repository).not_to receive(:incr_destroy)
expect { nx.wait }.to nap(5 * 60)
end
it "destroys repository and if not found" do
expect(nx).to receive(:check_queued_jobs).and_raise(Octokit::NotFound)
expect(github_repository).to receive(:incr_destroy)
expect { nx.wait }.to nap(0)
end
it "does not poll if it is disabled" do
expect(Config).to receive(:enable_github_workflow_poller).and_return(false)
expect(nx).not_to receive(:check_queued_jobs)
expect { nx.wait }.to nap(5 * 60)
end
end
describe "#destroy" do
it "does not destroy if has active runner" do
expect(github_repository).to receive(:runners).and_return([instance_double(GithubRunner)])
expect { nx.destroy }.to nap(5 * 60)
end
it "deletes resource and pops" do
expect(nx).to receive(:decr_destroy)
expect(github_repository).to receive(:destroy)
expect { nx.destroy }.to exit({"msg" => "github repository destroyed"})
end
end
end