application_name: &application_name llama-factory distributed: method: helm release_name: *application_name chart: llama-factory sets: app: llama-factory model: huggingfaceName: "Qwen/Qwen2.5-0.5B-Instruct" resources: gpuLimit: 1 cpuRequest: 8 memoryLimit: "16Gi" shmSize: "15Gi" llama: image: "docker.io/library/one-click:v1" workerSize: 2 nodeSelector: {} svc: svc_type: NodePort protocol: http hostname: 10.6.14.123 port: 30080 url: ~ pod: name: llamafactory monolithic: method: helm release_name: *application_name chart: llama-factory sets: app: llama-factory model: huggingfaceName: "Qwen/Qwen2.5-0.5B-Instruct" resources: gpuLimit: 1 cpuRequest: 8 memoryLimit: "16Gi" shmSize: "15Gi" llama: image: "docker.io/library/one-click:v1" workerSize: 1 nodeSelector: {} svc: svc_type: NodePort protocol: http hostname: 10.6.14.123 port: 30080 url: ~ pod: name: llama-factory