In a previous commit we extended the AI boot image to also include a cpu version of vLLM. In this change, we start to use the cpu version if the gpu count is zero.
34 lines
816 B
Ruby
Executable File
34 lines
816 B
Ruby
Executable File
#!/bin/env ruby
|
|
# frozen_string_literal: true
|
|
|
|
require_relative "../../common/lib/util"
|
|
require_relative "../lib/replica_setup"
|
|
|
|
replica_setup = ReplicaSetup.new
|
|
params = JSON.parse($stdin.read)
|
|
|
|
begin
|
|
gpu_count = params.fetch("gpu_count")
|
|
inference_engine = params.fetch("inference_engine")
|
|
inference_engine_params = params.fetch("inference_engine_params")
|
|
model = params.fetch("model")
|
|
replica_ubid = params.fetch("replica_ubid")
|
|
ssl_crt_path = params.fetch("ssl_crt_path")
|
|
ssl_key_path = params.fetch("ssl_key_path")
|
|
gateway_port = params.fetch("gateway_port")
|
|
rescue KeyError => e
|
|
puts "Needed #{e.key} in parameters"
|
|
exit 1
|
|
end
|
|
|
|
replica_setup.prep(
|
|
gpu_count:,
|
|
inference_engine:,
|
|
inference_engine_params:,
|
|
model:,
|
|
replica_ubid:,
|
|
ssl_crt_path:,
|
|
ssl_key_path:,
|
|
gateway_port:
|
|
)
|