feat: Optimized configuration

This commit is contained in:
vera
2026-02-09 18:22:06 +08:00
parent 76c8bdbcfc
commit cd4584ebae
6 changed files with 83 additions and 22 deletions

View File

@ -1,20 +1,18 @@
name: CI/CD Pipeline
on:
push:
branches: [ main ]
# pull_request:
# branches: [ main ]
name: Build container
env:
VERSION: 0.0.2
VERSION: 0.0.3
REGISTRY: https://harbor.bwgdi.com
REGISTRY_NAME: harbor.bwgdi.com
IMAGE_NAME: voxcpmtts
REGISTRY_PATH: library
DOCKER_NAME: voxcpmtts
on:
push:
branches:
- main
workflow_dispatch:
jobs:
build-docker:
runs-on: ubuntu-latest
runs-on: builder-ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
@ -32,4 +30,4 @@ jobs:
context: .
file: ./Dockerfile
push: true
tags: ${{ env.REGISTRY_NAME }}/library/${{env.IMAGE_NAME}}:${{ env.VERSION }}
tags: ${{ env.REGISTRY_NAME }}/${{ env.REGISTRY_PATH }}/${{ env.DOCKER_NAME }}:${{ env.VERSION }}

42
.gitignore vendored
View File

@ -1,4 +1,42 @@
launch.json
__pycache__
voxcpm.egg-info
.DS_Store
.DS_Store
# Python-generated files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
build/
dist/
wheels/
*.egg-info/
# Unit test / coverage reports
.pytest_cache/
.coverage
htmlcov/
coverage.xml
# Logs
*.log
log/*.log
# Virtual environments
.venv/
venv/
env/
# IDE settings
.vscode/
.idea/
# OS generated files
.DS_Store
# Generated files
*.wav
*.pdf
*.lock

View File

@ -7,6 +7,11 @@ RUN apt-get update && apt-get install -y \
WORKDIR /app
COPY api_concurrent.py requirements.txt ./
RUN pip install -r requirements.txt
ENV VOXCPM_MODEL_ID="/models/VoxCPM1.5/" \
VOXCPM_CPU_WORKERS="2" \
VOXCPM_UVICORN_WORKERS="1" \
MAX_GPU_CONCURRENT="1"
EXPOSE 5000
CMD [ "python", "./api_concurrent.py" ]

View File

@ -6,11 +6,18 @@ https://github.com/BoardWare-Genius/VoxCPM
| Version | Date | Summary |
|---------|------------|---------------------------------|
| 0.0.3 | 2026-02-09 | Optimized configuration & Model support |
| 0.0.2 | 2026-01-21 | Supports streaming |
| 0.0.1 | 2026-01-20 | Initial version |
### 🔄 Version Details
#### 🆕 0.0.3 *2026-02-09*
-**Configuration & Deployment**
- Supports configuring model path via `VOXCPM_MODEL_ID`
- Supports configuring CPU workers via `VOXCPM_CPU_WORKERS`
- Supports configuring Uvicorn workers via `VOXCPM_UVICORN_WORKERS`
#### 🆕 0.0.2 *2026-01-21*
-**Core Features**
@ -27,9 +34,20 @@ https://github.com/BoardWare-Genius/VoxCPM
# Start
```bash
docker pull harbor.bwgdi.com/library/voxcpmtts:0.0.2
docker pull harbor.bwgdi.com/library/voxcpmtts:0.0.3
docker run -d --restart always -p 5001:5000 --gpus '"device=0"' --mount type=bind,source=/Workspace/NAS11/model/Voice/VoxCPM,target=/models harbor.bwgdi.com/library/voxcpmtts:0.0.2
# Run with custom configuration
# -e VOXCPM_MODEL_ID: Path to the model directory inside container
# -e VOXCPM_CPU_WORKERS: Number of threads for CPU-bound tasks
# -e VOXCPM_UVICORN_WORKERS: Number of uvicorn workers
# -e MAX_GPU_CONCURRENT: Max concurrent GPU tasks
docker run -d --restart always -p 5001:5000 --gpus '"device=0"' \
-e VOXCPM_MODEL_ID="/models/VoxCPM1.5/" \
-e VOXCPM_CPU_WORKERS="2" \
-e VOXCPM_UVICORN_WORKERS="1" \
-e MAX_GPU_CONCURRENT="1" \
--mount type=bind,source=/Workspace/NAS11/model/Voice/VoxCPM,target=/models \
harbor.bwgdi.com/library/voxcpmtts:0.0.3
```
# Usage

View File

@ -25,7 +25,7 @@ class VoxCPMDemo:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"🚀 Running on device: {self.device}")
self.voxcpm_model: Optional[voxcpm.VoxCPM] = None
self.default_local_model_dir = "/models/VoxCPM1.5/"
self.default_local_model_dir = os.environ.get("VOXCPM_MODEL_ID", "/models/VoxCPM1.5/")
def _resolve_model_dir(self) -> str:
if os.path.isdir(self.default_local_model_dir):
@ -136,7 +136,8 @@ MAX_GPU_CONCURRENT = int(os.environ.get("MAX_GPU_CONCURRENT", "1"))
gpu_semaphore = asyncio.Semaphore(MAX_GPU_CONCURRENT)
# Use a thread pool for running blocking (CPU/GPU-bound) code.
executor = ThreadPoolExecutor(max_workers=2)
MAX_CPU_WORKERS = int(os.environ.get("VOXCPM_CPU_WORKERS", "2"))
executor = ThreadPoolExecutor(max_workers=MAX_CPU_WORKERS)
@app.on_event("shutdown")
def shutdown_event():
@ -159,6 +160,7 @@ async def generate_tts(
retry_badcase_ratio_threshold: float = Form(6.0),
prompt_wav: Optional[UploadFile] = None,
):
try:
prompt_path = None
if prompt_wav:
@ -266,4 +268,5 @@ async def root():
return {"message": "VoxCPM API running 🚀", "endpoints": ["/generate_tts"]}
if __name__ == "__main__":
uvicorn.run("api_concurrent:app", host="0.0.0.0", port=5000, workers=4)
uvicorn_workers = int(os.environ.get("VOXCPM_UVICORN_WORKERS", "1"))
uvicorn.run("api_concurrent:app", host="0.0.0.0", port=5000, workers=uvicorn_workers)

View File

@ -47,8 +47,7 @@ dependencies = [
"funasr",
"spaces",
"argbind",
"safetensors"
"safetensors",
]
[project.optional-dependencies]