vlms updated

This commit is contained in:
gdw6463
2024-05-08 12:50:46 +08:00
parent f3cd6cb09a
commit 2fbceddae5
3 changed files with 77 additions and 2 deletions

View File

@ -37,6 +37,10 @@ class Blackbox(ABC):
async def fast_api_handler(self, request: Request) -> Response:
pass
@abstractmethod
async def vlms_api_handler(self, request: Request) -> Response:
pass
@abstractmethod
def __call__(self, *args, **kwargs):
pass

View File

@ -12,6 +12,7 @@ from .text_and_image import TextAndImage
from .chroma_query import ChromaQuery
from .chroma_upsert import ChromaUpsert
from .chroma_chat import ChromaChat
from .vlms import VLMS
from injector import inject, singleton
@singleton
@ -29,10 +30,11 @@ class BlackboxFactory:
fastchat: Fastchat,
audio_chat: AudioChat,
g2e: G2E,
text_and_image:TextAndImage,
text_and_image: TextAndImage,
chroma_query: ChromaQuery,
chroma_upsert: ChromaUpsert,
chroma_chat: ChromaChat) -> None:
chroma_chat: ChromaChat,
vlms: VLMS) -> None:
self.models["audio_to_text"] = audio_to_text
self.models["text_to_audio"] = text_to_audio
self.models["asr"] = asr
@ -46,6 +48,7 @@ class BlackboxFactory:
self.models["chroma_query"] = chroma_query
self.models["chroma_upsert"] = chroma_upsert
self.models["chroma_chat"] = chroma_chat
self.models["vlms"] = vlms
def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs)

68
src/blackbox/vlms.py Normal file
View File

@ -0,0 +1,68 @@
from fastapi import Request, Response, status
from fastapi.responses import JSONResponse
from blackbox import Blackbox
from typing import Optional
import requests
import base64
def is_base64(value) -> bool:
try:
base64.b64decode(base64.b64decode(value)) == value.encode()
return True
except Exception:
return False
class VLMS(Blackbox):
def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs)
def valid(self, *args, **kwargs) -> bool:
data = args[0]
return isinstance(data, list)
def processing(self, prompt, images, model_name: Optional[str] = None) -> str:
if model_name == "Qwen-VL-Chat":
model_name = "infer-qwen-vl"
elif model_name == "llava-llama-3-8b-v1_1-transformers":
model_name = "infer-lav-lam-v1-1"
else:
model_name = "infer-qwen-vl"
url = 'http://120.196.116.194:48894/' + model_name + '/'
if is_base64(images):
images_data = images
else:
with open(images, "rb") as img_file:
images_data = str(base64.b64encode(img_file.read()), 'utf-8')
data_input = {'model': model_name, 'prompt': prompt, 'img_data': images_data}
data = requests.post(url, json=data_input)
return data.text
async def vlms_api_handler(self, request: Request) -> Response:
try:
data = await request.json()
except:
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
model_name = data.get("model_name")
prompt = data.get("prompt")
img_data = data.get("img_data")
if prompt is None:
return JSONResponse(content={'error': "Question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
if model_name is None or model_name.isspace():
model_name = "Qwen-VL-Chat"
return JSONResponse(content={"response": self.processing(model_name, prompt, img_data)},
status_code=status.HTTP_200_OK)