Merge pull request #10 from BoardWare-Genius/main

merge
This commit is contained in:
ACBBZ
2024-05-27 16:12:10 +08:00
committed by GitHub
5 changed files with 140 additions and 74 deletions

View File

@ -6,19 +6,20 @@ pip install -r sample/requirement_out_of_pytorch.txt
pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118 pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118
``` ```
## More Dependencies ## More Dependencies
| System | package | web | install command | | System | package | web | install command |
| --- | ---- | --- | --- | | --- |-----------------------| --- | --- |
| python | filetype | https://pypi.org/project/filetype/ | pip install filetype | | python | filetype | https://pypi.org/project/filetype/ | pip install filetype |
| python | fastAPI | https://fastapi.tiangolo.com/ | pip install fastapi | | python | fastAPI | https://fastapi.tiangolo.com/ | pip install fastapi |
| python | python-multipart | https://pypi.org/project/python-multipart/ | pip install python-multipart | | python | python-multipart | https://pypi.org/project/python-multipart/ | pip install python-multipart |
| python | uvicorn | https://www.uvicorn.org/ | pip install "uvicorn[standard]" | | python | uvicorn | https://www.uvicorn.org/ | pip install "uvicorn[standard]" |
| python | SpeechRecognition | https://pypi.org/project/SpeechRecognition/ | pip install SpeechRecognition | | python | SpeechRecognition | https://pypi.org/project/SpeechRecognition/ | pip install SpeechRecognition |
| python | gtts | https://pypi.org/project/gTTS/ | pip install gTTS | | python | gtts | https://pypi.org/project/gTTS/ | pip install gTTS |
| python | PyYAML | https://pypi.org/project/PyYAML/ | pip install PyYAML | | python | PyYAML | https://pypi.org/project/PyYAML/ | pip install PyYAML |
| python | injector | https://github.com/python-injector/injector | pip install injector | | python | injector | https://github.com/python-injector/injector | pip install injector |
| python | langchain | https://github.com/langchain-ai/langchain | pip install langchain | | python | langchain | https://github.com/langchain-ai/langchain | pip install langchain |
| python | chromadb | https://docs.trychroma.com/getting-started | pip install chromadb | | python | chromadb | https://docs.trychroma.com/getting-started | pip install chromadb |
| python | lagent | https://github.com/InternLM/lagent/blob/main/README.md | pip install lagent | | python | lagent | https://github.com/InternLM/lagent/blob/main/README.md | pip install lagent |
| python | sentence_transformers | https://github.com/InternLM/lagent/blob/main/README.md | pip install sentence_transformers |
## Start ## Start
@ -86,4 +87,6 @@ Model:
cudnn_conv_algo_search: EXHAUSTIVE cudnn_conv_algo_search: EXHAUSTIVE
do_copy_in_default_stream: true do_copy_in_default_stream: true
batch_size: 3 batch_size: 3
blackbox:
lazyloading: true
``` ```

View File

@ -9,4 +9,5 @@ pip install PyYAML
pip install injector pip install injector
pip install landchain pip install landchain
pip install chromadb pip install chromadb
pip install lagent pip install lagent
pip install sentence_transformers

View File

@ -1,57 +1,112 @@
from blackbox.emotion import Emotion
from .chat import Chat
from .audio_chat import AudioChat
from .sentiment import Sentiment
from .tts import TTS
from .asr import ASR
from .audio_to_text import AudioToText
from .blackbox import Blackbox from .blackbox import Blackbox
from .fastchat import Fastchat from injector import inject, singleton, Injector
from .g2e import G2E from ..configuration import BlackboxConf
from .text_and_image import TextAndImage
from .melotts import MeloTTS blackboxConf = Injector().get(BlackboxConf)
from .vlms import VLMS
from .chroma_query import ChromaQuery def model_loader(lazy=blackboxConf.lazyloading):
from .chroma_upsert import ChromaUpsert def load(init_fun):
from .chroma_chat import ChromaChat model = None
from injector import inject, singleton if not lazy:
model = init_fun()
def inner():
nonlocal model
if model is None:
model = init_fun()
return model
return inner
return load
@model_loader(lazy=blackboxConf.lazyloading)
def text_and_image_loader():
from .text_and_image import TextAndImage
return Injector().get(TextAndImage)
@model_loader(lazy=blackboxConf.lazyloading)
def audio_chat_loader():
from .audio_chat import AudioChat
return Injector().get(AudioChat)
@model_loader(lazy=blackboxConf.lazyloading)
def sentiment_loader():
from .sentiment import Sentiment
return Injector().get(Sentiment)
@model_loader(lazy=blackboxConf.lazyloading)
def g2e_loader():
from .g2e import G2E
return Injector().get(G2E)
@model_loader(lazy=blackboxConf.lazyloading)
def asr_loader():
from .asr import ASR
return Injector().get(ASR)
@model_loader(lazy=blackboxConf.lazyloading)
def vlms_loader():
from .vlms import VLMS
return Injector().get(VLMS)
@model_loader(lazy=blackboxConf.lazyloading)
def melotts_loader():
from .melotts import MeloTTS
return Injector().get(MeloTTS)
@model_loader(lazy=blackboxConf.lazyloading)
def tts_loader():
from .tts import TTS
return Injector().get(TTS)
@model_loader(lazy=blackboxConf.lazyloading)
def emotion_loader():
from .emotion import Emotion
return Injector().get(Emotion)
@model_loader(lazy=blackboxConf.lazyloading)
def fastchat_loader():
from .fastchat import Fastchat
return Injector().get(Fastchat)
@model_loader(lazy=blackboxConf.lazyloading)
def chat_loader():
from .chat import Chat
return Injector().get(Chat)
@model_loader(lazy=blackboxConf.lazyloading)
def chroma_query_loader():
from .chroma_query import ChromaQuery
return Injector().get(ChromaQuery)
@model_loader(lazy=blackboxConf.lazyloading)
def chroma_upsert_loader():
from .chroma_upsert import ChromaUpsert
return Injector().get(ChromaUpsert)
@model_loader(lazy=blackboxConf.lazyloading)
def chroma_chat_load():
from .chroma_chat import ChromaChat
return Injector().get(ChromaChat)
@singleton @singleton
class BlackboxFactory: class BlackboxFactory:
models = {} models = {}
@inject @inject
def __init__(self, def __init__(self,) -> None:
audio_to_text: AudioToText, self.models["asr"] = asr_loader
asr: ASR, self.models["tts"] = tts_loader
tts: TTS, self.models["sentiment_engine"] = sentiment_loader
sentiment_engine: Sentiment, self.models["emotion"] = emotion_loader
emotion: Emotion, self.models["fastchat"] = fastchat_loader
fastchat: Fastchat, self.models["audio_chat"] = audio_chat_loader
audio_chat: AudioChat, self.models["g2e"] = g2e_loader
g2e: G2E, self.models["text_and_image"] = text_and_image_loader
text_and_image: TextAndImage, self.models["chroma_query"] = chroma_query_loader
melotts: MeloTTS, self.models["chroma_upsert"] = chroma_upsert_loader
vlms: VLMS, self.models["chroma_chat"] = chroma_chat_load
chroma_query: ChromaQuery, self.models["melotts"] = melotts_loader
chroma_upsert: ChromaUpsert, self.models["vlms"] = vlms_loader
chroma_chat: ChromaChat, self.models["chat"] = chat_loader
chat: Chat) -> None:
self.models["audio_to_text"] = audio_to_text
self.models["asr"] = asr
self.models["tts"] = tts
self.models["sentiment_engine"] = sentiment_engine
self.models["emotion"] = emotion
self.models["fastchat"] = fastchat
self.models["audio_chat"] = audio_chat
self.models["g2e"] = g2e
self.models["text_and_image"] = text_and_image
self.models["chroma_query"] = chroma_query
self.models["chroma_upsert"] = chroma_upsert
self.models["chroma_chat"] = chroma_chat
self.models["melotts"] = melotts
self.models["vlms"] = vlms
self.models["chat"] = chat
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs) return self.processing(*args, **kwargs)
@ -60,4 +115,4 @@ class BlackboxFactory:
model = self.models.get(blackbox_name) model = self.models.get(blackbox_name)
if model is None: if model is None:
raise ValueError("Invalid Blackbox Type...") raise ValueError("Invalid Blackbox Type...")
return model return model()

View File

@ -1,4 +1,3 @@
import logging
from typing import Any, Coroutine from typing import Any, Coroutine
from fastapi import Request, Response, status from fastapi import Request, Response, status
@ -14,8 +13,6 @@ import re
from injector import singleton from injector import singleton
logger = logging.getLogger
@singleton @singleton
class Chat(Blackbox): class Chat(Blackbox):
@ -27,15 +24,14 @@ class Chat(Blackbox):
return isinstance(data, list) return isinstance(data, list)
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b # model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
@logging_time(logger=logger) @logging_time()
def processing(self, *args, **kwargs) -> str: def processing(self, prompt: str, context: list, settings: dict) -> str:
settings: dict = args[0]
if settings is None: if settings is None:
settings = {} settings = {}
user_model_name = settings.get("model_name") user_model_name = settings.get("model_name")
user_context = settings.get("context") user_context = context
user_question = settings.get("question") user_question = prompt
user_template = settings.get("template") user_template = settings.get("template")
user_temperature = settings.get("temperature") user_temperature = settings.get("temperature")
user_top_p = settings.get("top_p") user_top_p = settings.get("top_p")
@ -44,7 +40,6 @@ class Chat(Blackbox):
user_stop = settings.get("stop") user_stop = settings.get("stop")
user_frequency_penalty = settings.get("frequency_penalty") user_frequency_penalty = settings.get("frequency_penalty")
user_presence_penalty = settings.get("presence_penalty") user_presence_penalty = settings.get("presence_penalty")
if user_context == None: if user_context == None:
user_context = [] user_context = []
@ -124,5 +119,7 @@ class Chat(Blackbox):
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST) return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
setting: dict = data.get("settings") setting: dict = data.get("settings")
context = data.get("context")
return JSONResponse(content={"response": self.processing(setting)}, status_code=status.HTTP_200_OK) prompt = data.get("prompt")
return JSONResponse(content={"response": self.processing(prompt, context, setting)}, status_code=status.HTTP_200_OK)

View File

@ -1,4 +1,5 @@
from dataclasses import dataclass
from injector import inject,singleton from injector import inject,singleton
import yaml import yaml
import sys import sys
@ -101,4 +102,13 @@ class EnvConf():
def __init__(self, config: Configuration) -> None: def __init__(self, config: Configuration) -> None:
self.version = "0.0.1" self.version = "0.0.1"
self.host = config.get("env.host", default="0.0.0.0") self.host = config.get("env.host", default="0.0.0.0")
self.port = config.get("env.port", default="8080") self.port = config.get("env.port", default="8080")
@singleton
@dataclass
class BlackboxConf():
lazyloading: bool
@inject
def __init__(self, config: Configuration) -> None:
self.lazyloading = bool(config.get("blackbox.lazyloading", default=False))