Merge pull request #23 from BoardWare-Genius/ivan

Modefy vlm blackbox
This commit is contained in:
IvanWu
2025-01-08 17:35:28 +08:00
committed by GitHub
4 changed files with 265 additions and 12 deletions

View File

@ -0,0 +1,93 @@
import io
import time
import requests
from fastapi import Request, Response, status
from fastapi.responses import JSONResponse
from injector import inject
from injector import singleton
from ..log.logging_time import logging_time
from ..configuration import CosyVoiceConf
from .blackbox import Blackbox
import soundfile
import pyloudnorm as pyln
import sys
sys.path.append('/home/gpu/Workspace/CosyVoice')
from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.utils.file_utils import load_wav
import torchaudio
import os
import logging
logger = logging.getLogger(__name__)
@singleton
class CosyVoiceTTS(Blackbox):
mode: str
url: str
speed: int
device: str
language: str
speaker: str
@logging_time(logger=logger)
def model_init(self, cosyvoice_config: CosyVoiceConf) -> None:
self.speed = cosyvoice_config.speed
self.device = cosyvoice_config.device
self.language = cosyvoice_config.language
self.speaker = cosyvoice_config.speaker
self.device = cosyvoice_config.device
self.url = ''
self.mode = cosyvoice_config.mode
self.cosyvoicetts = None
self.speaker_ids = None
os.environ['CUDA_VISIBLE_DEVICES'] = str(cosyvoice_config.device)
if self.mode == 'local':
self.cosyvoicetts = CosyVoice('/home/gpu/Workspace/Models/CosyVoice/pretrained_models/CosyVoice-300M')
else:
self.url = cosyvoice_config.url
logging.info('#### Initializing CosyVoiceTTS Service in cuda:' + str(cosyvoice_config.device) + ' mode...')
@inject
def __init__(self, cosyvoice_config: CosyVoiceConf) -> None:
self.model_init(cosyvoice_config)
def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs)
def valid(self, *args, **kwargs) -> bool:
text = args[0]
return isinstance(text, str)
@logging_time(logger=logger)
def processing(self, *args, **kwargs) -> io.BytesIO | bytes:
text = args[0]
current_time = time.time()
if self.mode == 'local':
audio = self.cosyvoicetts.inference_sft(text, self.language)
f = io.BytesIO()
soundfile.write(f, audio['tts_speech'].cpu().numpy().squeeze(0), 22050, format='wav')
f.seek(0)
print("#### CosyVoiceTTS Service consume - local : ", (time.time() - current_time))
return f.read()
else:
message = {
"text": text
}
response = requests.post(self.url, json=message)
print("#### CosyVoiceTTS Service consume - docker : ", (time.time()-current_time))
return response.content
async def fast_api_handler(self, request: Request) -> Response:
try:
data = await request.json()
except:
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
text = data.get("text")
if text is None:
return JSONResponse(content={"error": "text is required"}, status_code=status.HTTP_400_BAD_REQUEST)
return Response(content=self.processing(text), media_type="audio/wav", headers={"Content-Disposition": "attachment; filename=audio.wav"})

108
src/blackbox/melotts.py Normal file
View File

@ -0,0 +1,108 @@
import io
import time
import requests
from fastapi import Request, Response, status
from fastapi.responses import JSONResponse
from injector import inject
from injector import singleton
from ..log.logging_time import logging_time
from ..configuration import MeloConf
from .blackbox import Blackbox
import soundfile
import pyloudnorm as pyln
from melo.api import TTS
import logging
logger = logging.getLogger(__name__)
@singleton
class MeloTTS(Blackbox):
mode: str
url: str
speed: int
device: str
language: str
speaker: str
@logging_time(logger=logger)
def model_init(self, melo_config: MeloConf) -> None:
self.speed = melo_config.speed
self.device = melo_config.device
self.language = melo_config.language
self.speaker = melo_config.speaker
self.device = melo_config.device
self.url = ''
self.mode = melo_config.mode
self.melotts = None
self.speaker_ids = None
if self.mode == 'local':
self.melotts = TTS(language=self.language, device=self.device)
self.speaker_ids = self.melotts.hps.data.spk2id
else:
self.url = melo_config.url
logging.info('#### Initializing MeloTTS Service in ' + self.device + ' mode...')
@inject
def __init__(self, melo_config: MeloConf) -> None:
self.model_init(melo_config)
def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs)
def valid(self, *args, **kwargs) -> bool:
text = args[0]
return isinstance(text, str)
@logging_time(logger=logger)
def processing(self, *args, **kwargs) -> io.BytesIO | bytes:
text = args[0]
current_time = time.time()
if self.mode == 'local':
audio = self.melotts.tts_to_file(text, self.speaker_ids[self.speaker], speed=self.speed)
f = io.BytesIO()
soundfile.write(f, audio, 44100, format='wav')
f.seek(0)
# print("#### MeloTTS Service consume - local : ", (time.time() - current_time))
# return f.read()
# Read the audio data from the buffer
data, rate = soundfile.read(f, dtype='float32')
# Peak normalization
peak_normalized_audio = pyln.normalize.peak(data, -1.0)
# Integrated loudness normalization
meter = pyln.Meter(rate)
loudness = meter.integrated_loudness(peak_normalized_audio)
loudness_normalized_audio = pyln.normalize.loudness(peak_normalized_audio, loudness, -12.0)
# Write the loudness normalized audio to an in-memory buffer
normalized_audio_buffer = io.BytesIO()
soundfile.write(normalized_audio_buffer, loudness_normalized_audio, rate, format='wav')
normalized_audio_buffer.seek(0)
print("#### MeloTTS Service consume - local : ", (time.time() - current_time))
return normalized_audio_buffer.read()
else:
message = {
"text": text
}
response = requests.post(self.url, json=message)
print("#### MeloTTS Service consume - docker : ", (time.time()-current_time))
return response.content
async def fast_api_handler(self, request: Request) -> Response:
try:
data = await request.json()
except:
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
text = data.get("text")
if text is None:
return JSONResponse(content={"error": "text is required"}, status_code=status.HTTP_400_BAD_REQUEST)
return Response(content=self.processing(text), media_type="audio/wav", headers={"Content-Disposition": "attachment; filename=audio.wav"})

View File

@ -12,13 +12,13 @@ import requests
import base64 import base64
import copy import copy
import ast import ast
import random
from time import time
import io import io
from PIL import Image from PIL import Image
from lmdeploy.serve.openai.api_client import APIClient from lmdeploy.serve.openai.api_client import APIClient
import io
from PIL import Image
from lmdeploy.serve.openai.api_client import APIClient
def is_base64(value) -> bool: def is_base64(value) -> bool:
try: try:
@ -50,8 +50,8 @@ class VLMS(Blackbox):
- ignore_eos (bool): indicator for ignoring eos - ignore_eos (bool): indicator for ignoring eos
- skip_special_tokens (bool): Whether or not to remove special tokens - skip_special_tokens (bool): Whether or not to remove special tokens
in the decoding. Default to be True.""" in the decoding. Default to be True."""
self.url = vlm_config.url self.model_dict = vlm_config.urls
self.model_url = None
self.temperature: float = 0.7 self.temperature: float = 0.7
self.top_p:float = 1 self.top_p:float = 1
self.max_tokens: (int |None) = 512 self.max_tokens: (int |None) = 512
@ -81,7 +81,7 @@ class VLMS(Blackbox):
data = args[0] data = args[0]
return isinstance(data, list) return isinstance(data, list)
def processing(self, prompt:str, images:str | bytes, settings: dict, model_name: Optional[str] = None, user_context: List[dict] = None) -> str: def processing(self, prompt:str | None, images:str | bytes | None, settings: dict, model_name: Optional[str] = None, user_context: List[dict] = None) -> str:
""" """
Args: Args:
prompt: a string query to the model. prompt: a string query to the model.
@ -105,6 +105,9 @@ class VLMS(Blackbox):
else: else:
settings = {} settings = {}
if not prompt:
prompt = '你是一个辅助机器人请就此图做一个简短的概括性描述包括图中的主体物品及状态不超过50字。' if images else '你好'
# Transform the images into base64 format where openai format need. # Transform the images into base64 format where openai format need.
if images: if images:
if is_base64(images): # image as base64 str if is_base64(images): # image as base64 str
@ -148,7 +151,11 @@ class VLMS(Blackbox):
# 'content': '图片中主要展示了一只老虎,它正在绿色的草地上休息。草地上有很多可以让人坐下的地方,而且看起来相当茂盛。背景比较模糊,可能是因为老虎的影响,让整个图片的其他部分都变得不太清晰了。' # 'content': '图片中主要展示了一只老虎,它正在绿色的草地上休息。草地上有很多可以让人坐下的地方,而且看起来相当茂盛。背景比较模糊,可能是因为老虎的影响,让整个图片的其他部分都变得不太清晰了。'
# } # }
# ] # ]
api_client = APIClient(self.url)
user_context = self.keep_last_k_images(user_context,k = 1)
if self.model_url is None: self.model_url = self._get_model_url(model_name)
api_client = APIClient(self.model_url)
# api_client = APIClient("http://10.6.80.91:23333") # api_client = APIClient("http://10.6.80.91:23333")
model_name = api_client.available_models[0] model_name = api_client.available_models[0]
# Reformat input into openai format to request. # Reformat input into openai format to request.
@ -198,10 +205,28 @@ class VLMS(Blackbox):
print(item["choices"][0]["message"]['content']) print(item["choices"][0]["message"]['content'])
responses += item["choices"][0]["message"]['content'] responses += item["choices"][0]["message"]['content']
# total_token_usage += item['usage']['total_tokens'] # 'usage': {'prompt_tokens': *, 'total_tokens': *, 'completion_tokens': *} # total_token_usage += item['usage']['total_tokens'] # 'usage': {'prompt_tokens': *, 'total_tokens': *, 'completion_tokens': *}
user_context = messages + [{'role': 'assistant', 'content': responses}] user_context = messages + [{'role': 'assistant', 'content': responses}]
self.custom_print(user_context)
return responses, user_context return responses, user_context
def _get_model_url(self,model_name:str | None):
available_models = {}
for model, url in self.model_dict.items():
try:
response = requests.get(url,timeout=3)
if response.status_code == 200:
available_models[model] = url
except Exception as e:
# print(e)
pass
if not available_models: print("There are no available running models and please check your endpoint urls.")
if model_name and model_name in available_models:
return available_models[model_name]
else:
model = random.choice(list(available_models.keys()))
print(f"No such model {model_name}, using {model} instead.") if model_name else print(f"Using random model {model}.")
return available_models[model]
def _into_openai_format(self, context:List[list]) -> List[dict]: def _into_openai_format(self, context:List[list]) -> List[dict]:
""" """
Convert the data into openai format. Convert the data into openai format.
@ -255,7 +280,35 @@ class VLMS(Blackbox):
return user_context return user_context
def keep_last_k_images(self, user_context: list, k:int=2):
count = 0
result =[]
for item in user_context[::-1]:
if item['role'] == 'user' and len(item['content']) > 1:
for idx, info in enumerate(item['content']):
if info['type'] in ('image_url','image') and count >= k:
item['content'].pop(idx)
# item['content'].insert(idx, {'type': 'text', 'text': '<IMAGE>'})
elif info['type'] in ('image_url','image') and count < k:
count += 1
else:
continue
result.append(item)
return result[::-1]
def custom_print(self, user_context: list):
result = []
for item in user_context:
if item['role'] == 'user':
for idx, info in enumerate(item['content']):
if info['type'] in ('image_url','image'):
item['content'].pop(idx)
item['content'].insert(idx, {'type': 'image', 'image': '##<IMAGE>##'})
else:
continue
result.append(item)
print(result)
async def fast_api_handler(self, request: Request) -> Response: async def fast_api_handler(self, request: Request) -> Response:
## TODO: add support for multiple images and support image in form-data format ## TODO: add support for multiple images and support image in form-data format
json_request = True json_request = True
@ -273,7 +326,6 @@ class VLMS(Blackbox):
prompt = data.get("prompt") prompt = data.get("prompt")
settings: dict = data.get('settings') settings: dict = data.get('settings')
context = data.get("context") context = data.get("context")
if not context: if not context:
user_context = [] user_context = []
elif isinstance(context[0], list): elif isinstance(context[0], list):
@ -292,8 +344,8 @@ class VLMS(Blackbox):
if prompt is None: if prompt is None:
return JSONResponse(content={'error': "Question is required"}, status_code=status.HTTP_400_BAD_REQUEST) return JSONResponse(content={'error': "Question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
if model_name is None or model_name.isspace(): # if model_name is None or model_name.isspace():
model_name = "Qwen-VL-Chat" # model_name = "Qwen-VL-Chat"
response, history = self.processing(prompt, img_data,settings, model_name,user_context=user_context) response, history = self.processing(prompt, img_data,settings, model_name,user_context=user_context)
# jsonresp = str(JSONResponse(content={"response": self.processing(prompt, img_data, model_name)}).body, "utf-8") # jsonresp = str(JSONResponse(content={"response": self.processing(prompt, img_data, model_name)}).body, "utf-8")

View File

@ -153,4 +153,4 @@ class VLMConf():
@inject @inject
def __init__(self, config: Configuration) -> None: def __init__(self, config: Configuration) -> None:
self.url = config.get("vlms.url") self.urls = config.get("vlms.urls")