This commit introduces a hugepages configuration for postgres via a rhizome lib+bin. At the moment, it uses the default huge page size (2M), and calculates the number of huge pages as 25% of the total available memory. The setup adjusts the shared_buffers accordingly to fit the shared memory within the allocated number of hugepages. The configuration happens as an `ExecStartPre` script added to the existing systemd unit managed by pg_ctlcluster. To keep the semantics of the `start` command unchanged, the hugepages script *does not* stop an existing cluster to configure hugepages, and only does it when the cluster is not running. In a subsequent change, we can consider adding the boot parameters to setup 1G hugepages for larger instances (possibly standard-30+), where they are most useful. [1]: https://www.postgresql.org/docs/current/kernel-resources.html#LINUX-HUGE-PAGES
74 lines
2.2 KiB
Ruby
74 lines
2.2 KiB
Ruby
# frozen_string_literal: true
|
|
|
|
require_relative "../../common/lib/util"
|
|
require "logger"
|
|
|
|
class HugepagesSetup
|
|
def initialize(instance, logger)
|
|
@version, @cluster = instance.split("-", 2)
|
|
@logger = logger
|
|
end
|
|
|
|
def get_postgres_param(param)
|
|
Integer(r("sudo -u postgres /usr/lib/postgresql/#{@version.shellescape}/bin/postgres -D /dat/#{@version.shellescape}/data -c config_file=/etc/postgresql/#{@version.shellescape}/#{@cluster.shellescape}/postgresql.conf -C #{param.shellescape}"), 10)
|
|
end
|
|
|
|
def hugepage_info
|
|
meminfo = File.read("/proc/meminfo")
|
|
hugepages_count = Integer(meminfo[/^HugePages_Total:\s*(\d+)/, 1], 10)
|
|
hugepage_size_kib = Integer(meminfo[/^Hugepagesize:\s*(\d+)\s*kB/, 1], 10)
|
|
[hugepages_count, hugepage_size_kib]
|
|
end
|
|
|
|
def stop_postgres_cluster
|
|
r "sudo pg_ctlcluster stop #{@version} #{@cluster}", expect: [0, 2] # 2 is "not running"
|
|
end
|
|
|
|
def setup_postgres_hugepages
|
|
hugepages_count, hugepage_size_kib = hugepage_info
|
|
|
|
if hugepages_count == 0
|
|
@logger.warn("No hugepages configured, skipping setup.")
|
|
return
|
|
end
|
|
|
|
hugepages_kib = hugepages_count * hugepage_size_kib
|
|
update_postgres_hugepages_conf(hugepages_kib)
|
|
|
|
shmem_and_overhead_kib = 1024 * get_postgres_param("shared_memory_size")
|
|
overhead = shmem_and_overhead_kib - hugepages_kib
|
|
|
|
target_kib = hugepages_kib - overhead
|
|
|
|
# Floor division to nearest multiple of block_size: (a / b) * b
|
|
block_size_bytes = get_postgres_param("block_size")
|
|
block_size_kib = block_size_bytes / 1024
|
|
final_shared_buffers_kib = (target_kib / block_size_kib) * block_size_kib
|
|
|
|
update_postgres_hugepages_conf(final_shared_buffers_kib)
|
|
end
|
|
|
|
def update_postgres_hugepages_conf(shared_buffers_kib)
|
|
safe_write_to_file("/etc/postgresql/#{@version}/#{@cluster}/conf.d/002-hugepages.conf", <<CONF
|
|
huge_pages = 'on'
|
|
huge_page_size = 0
|
|
shared_buffers = #{shared_buffers_kib}kB
|
|
CONF
|
|
)
|
|
end
|
|
|
|
def postgres_running?
|
|
r "sudo pg_ctlcluster status #{@version} #{@cluster}", expect: [3]
|
|
false
|
|
rescue CommandFail
|
|
true
|
|
end
|
|
|
|
def setup
|
|
unless postgres_running?
|
|
stop_postgres_cluster
|
|
setup_postgres_hugepages
|
|
end
|
|
end
|
|
end
|