new branch

This commit is contained in:
ACBBZ
2024-05-10 07:21:21 +00:00
parent f3cd6cb09a
commit 21ac4a5319
7 changed files with 272 additions and 60 deletions

View File

@ -1,14 +1,14 @@
from .audio_chat import AudioChat
from .sentiment import Sentiment
from .tts import TTS
from .asr import ASR
from .audio_to_text import AudioToText
# from .audio_chat import AudioChat
# from .sentiment import Sentiment
# from .tts import TTS
# from .asr import ASR
# from .audio_to_text import AudioToText
from .blackbox import Blackbox
from .text_to_audio import TextToAudio
from .tesou import Tesou
# from .text_to_audio import TextToAudio
# from .tesou import Tesou
from .fastchat import Fastchat
from .g2e import G2E
from .text_and_image import TextAndImage
# from .g2e import G2E
# from .text_and_image import TextAndImage
from .chroma_query import ChromaQuery
from .chroma_upsert import ChromaUpsert
from .chroma_chat import ChromaChat
@ -20,29 +20,29 @@ class BlackboxFactory:
@inject
def __init__(self,
audio_to_text: AudioToText,
text_to_audio: TextToAudio,
asr: ASR,
tts: TTS,
sentiment_engine: Sentiment,
tesou: Tesou,
# audio_to_text: AudioToText,
# text_to_audio: TextToAudio,
# asr: ASR,
# tts: TTS,
# sentiment_engine: Sentiment,
# tesou: Tesou,
fastchat: Fastchat,
audio_chat: AudioChat,
g2e: G2E,
text_and_image:TextAndImage,
# audio_chat: AudioChat,
# g2e: G2E,
# text_and_image:TextAndImage,
chroma_query: ChromaQuery,
chroma_upsert: ChromaUpsert,
chroma_chat: ChromaChat) -> None:
self.models["audio_to_text"] = audio_to_text
self.models["text_to_audio"] = text_to_audio
self.models["asr"] = asr
self.models["tts"] = tts
self.models["sentiment_engine"] = sentiment_engine
self.models["tesou"] = tesou
# self.models["audio_to_text"] = audio_to_text
# self.models["text_to_audio"] = text_to_audio
# self.models["asr"] = asr
# self.models["tts"] = tts
# self.models["sentiment_engine"] = sentiment_engine
# self.models["tesou"] = tesou
self.models["fastchat"] = fastchat
self.models["audio_chat"] = audio_chat
self.models["g2e"] = g2e
self.models["text_and_image"] = text_and_image
# self.models["audio_chat"] = audio_chat
# self.models["g2e"] = g2e
# self.models["text_and_image"] = text_and_image
self.models["chroma_query"] = chroma_query
self.models["chroma_upsert"] = chroma_upsert
self.models["chroma_chat"] = chroma_chat

View File

@ -5,13 +5,17 @@ from fastapi.responses import JSONResponse
from .blackbox import Blackbox
from ..utils import chroma_setting
from .fastchat import Fastchat
from .chroma_query import ChromaQuery
DEFAULT_COLLECTION_ID = "123"
from injector import singleton
from injector import singleton,inject
@singleton
class ChromaChat(Blackbox):
def __init__(self, fastchat, chroma_query):
@inject
def __init__(self, fastchat: Fastchat, chroma_query: ChromaQuery):
self.fastchat = fastchat
self.chroma_query = chroma_query
@ -29,11 +33,13 @@ class ChromaChat(Blackbox):
collection_id = DEFAULT_COLLECTION_ID
else:
collection_id = setting.ChromaSetting.collection_ids[0]
print("collection_id: ",collection_id)
# query it
chroma_result = self.chroma_query(question, collection_id)
print("chroma_result: ",type(chroma_result),chroma_result)
fast_question = "问题: "+ question + "。根据问题,总结以下内容:" + chroma_result
response = self.fastchat(fast_question)
fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源" + chroma_result
response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字', context=None)
return response

View File

@ -39,7 +39,7 @@ class ChromaQuery(Blackbox):
n_results=3,
)
response = results["documents"] + results["metadatas"]
response = str(results["documents"] + results["metadatas"])
return response

View File

@ -59,8 +59,11 @@ class ChromaUpsert(Blackbox):
else:
collection_id = "123"
print("file: ",file)
print("file name: ",file.filename)
if file is not None:
file_type = file.split(".")[-1]
file_type = file.filename.split(".")[-1]
print("file_type: ",file_type)
if file_type == "pdf":
loader = PyPDFLoader(file)
elif file_type == "txt":
@ -102,16 +105,12 @@ class ChromaUpsert(Blackbox):
async def fast_api_handler(self, request: Request) -> Response:
try:
data = await request.json()
except:
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
user_collection_id = data.get("collection_id")
user_file = data.get("file")
user_string = data.get("string")
user_context = data.get("context")
user_setting = data.get("setting")
user_collection_id = (await request.form()).get("collection_id")
user_file = (await request.form()).get("file")
user_string = (await request.form()).get("string")
user_context = (await request.form()).get("context")
user_setting = (await request.form()).get("setting")
if user_collection_id is None and user_setting["collections"] == []:
return JSONResponse(content={"error": "The first creation requires a collection id"}, status_code=status.HTTP_400_BAD_REQUEST)
@ -119,6 +118,17 @@ class ChromaUpsert(Blackbox):
if user_file is None and user_string is None:
return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST)
# data = await user_file.read()
# with open(f'./{data.filename}', 'wb') as f:
# f.write(content)
loader = PyPDFLoader(f'./{user_file.filename}')
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
print("docs: ",docs)
return JSONResponse(
content={"response": self.processing(user_collection_id, user_file, user_string, user_context, user_setting)},
status_code=status.HTTP_200_OK)

View File

@ -19,40 +19,68 @@ class Fastchat(Blackbox):
return isinstance(data, list)
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
def processing(self, model_name, prompt, template, context: list) -> str:
def processing(self, model_name, prompt, template, context: list, temperature, top_p, top_k, n, max_tokens) -> str:
if context == None:
context = []
url = 'http://120.196.116.194:48892/v1/chat/completions'
# history可以为空列表,也可以是用户的对话历史
# history = [
# context可以为空列表,也可以是用户的对话历史
# context = [
# {
# "role": "user",
# "content": "你吃饭了吗"
# "content": "智能体核心思想"
# },
# {
# "role": "assistant",
# "content": "作为一个AI模型我没有吃饭的需要因为我并不具备实体形态。我专注于提供信息和帮助回答你的问题。你有什么需要帮助的吗"
# "content": "智能体的核心思想是将人工智能应用于问题求解者角色,它通过算法模拟人类决策过程,通过感知环境、学习、规划和执行行动,以实现特定任务或目标。其目标是通过自我适应和优化,实现高效问题解决。"
# },
# ]
prompt_template = [
{"role": "system", "content": template},
]
fastchat_inputs={
"model": model_name,
"messages": context + [
"messages": prompt_template + context + [
{
"role": "user",
"content": template + prompt
"content": prompt
}
]
],
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"n": n,
"max_tokens": max_tokens,
"stream": False,
}
# {
# "model": "string",
# "messages": "string",
# "temperature": 0.7, # between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
# "top_p": 1, # 控制生成下一个单词的概率分布,即从所有可能的单词中,只选择概率最高的一部分作为候选单词
# "top_k": -1, # top-k 参数设置为 3意味着选择前三个tokens。
# "n": 1, # How many chat completion choices to generate for each input message.
# "max_tokens": 1024, # The maximum number of tokens to generate in the chat completion.
# "stop": [
# "string"
# ],
# "stream": False,
# "presence_penalty": 0, # Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
# "frequency_penalty": 0, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the models likelihood to repeat the same line verbatim.
# "user": "string"
# }
fastchat_response = requests.post(url, json=fastchat_inputs)
user_message = fastchat_inputs["messages"]
context.append(user_message)
# user_message = fastchat_inputs["messages"]
# context.append(user_message)
assistant_message = fastchat_response.json()["choices"][0]["message"]
context.append(assistant_message)
# context.append(assistant_message)
fastchat_content = assistant_message["content"]
@ -68,17 +96,37 @@ class Fastchat(Blackbox):
user_context = data.get("context")
user_prompt = data.get("prompt")
user_template = data.get("template")
user_temperature = data.get("temperature")
user_top_p = data.get("top_p")
user_top_k = data.get("top_k")
user_n = data.get("n")
user_max_tokens = data.get("max_tokens")
if user_prompt is None:
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
if user_model_name is None or user_model_name.isspace():
if user_model_name is None or user_model_name.isspace() or user_model_name == "":
user_model_name = "Qwen1.5-14B-Chat"
if user_template is None or user_template.isspace():
# user_template 是定义LLM的语气例如template = "使用小丑的语气说话。"user_template可以为空字串或者是用户自定义的语气,或者是使用我们提供的语气
# user_template 是定义LLM的语气例如template = "使用小丑的语气说话。"user_template可以为空字串或者是用户自定义的语气
user_template = ""
else:
user_template = f"使用{user_template}的语气说话。"
if user_temperature is None or user_temperature == "":
user_temperature = 0.7
return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context)}, status_code=status.HTTP_200_OK)
if user_top_p is None or user_top_p == "":
user_top_p = 1
if user_top_k is None or user_top_k == "":
user_top_k = -1
if user_n is None or user_n == "":
user_n = 3
if user_max_tokens is None or user_max_tokens == "":
user_max_tokens = 1024
return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context,
user_temperature, user_top_p, user_top_k, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK)

145
src/blackbox/modelscope.py Executable file
View File

@ -0,0 +1,145 @@
from typing import Any, Coroutine
from fastapi import Request, Response, status
from fastapi.responses import JSONResponse
from .blackbox import Blackbox
import requests
import json
from modelscope_agent.agents import RolePlay
from modelscope_agent.tools.base import BaseTool
from modelscope_agent.tools import register_tool
class Modelscope(Blackbox):
def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs)
def valid(self, *args, **kwargs) -> bool:
data = args[0]
return isinstance(data, list)
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
def processing(self, model_name, prompt, template, context: list) -> str:
if context == None:
context = []
@register_tool('ChromaQuery')
class AliyunRenewInstanceTool(BaseTool):
description = '查询chroma数据库中的数据'
name = 'ChromaQuery'
parameters: list = [{
'name': 'id',
'description': '用户的chroma id',
'required': True,
'type': 'string'
}, {
'name': 'query',
'description': '用户需要在chroma中查询的问题',
'required': True,
'type': 'string'
}]
def call(self, params: str, **kwargs):
params = self._verify_args(params)
id = params['id']
query = params['query']
query_data = {
"chroma_query_data": {
"id": id,
"question": query
}
}
url = "http://10.6.80.75:7003"
response = requests.post(f"{url}/api/chroma_query", json=query_data)
result = response.json()['response']
return str({'result': f'Chroma ID为{id}的用户,查询结果为{response}'})
@register_tool('WebSearch')
class WebSearchTool(BaseTool):
description = '查询网络中的内容'
name = 'WebSearch'
parameters: list = [ {
'name': 'search_term',
'description': '用户需要在Web中查询的问题',
'required': True,
'type': 'string'
}]
def call(self, params: str, **kwargs):
params = self._verify_args(params)
search_term = params['search_term']
api_key='9e51be0aaecb5a56fe2faead6e2c702fde92e62a'
headers = {
'X-API-KEY': api_key,
'Content-Type': 'application/json',
}
params = {
'q': search_term
}
try:
response = requests.post(
f'https://google.serper.dev/search',
headers=headers,
params=params,
timeout=5)
except Exception as e:
return -1, str(e)
result = response.json()['answerBox']['snippet']
return str({'result': f'WebSearch查询结果为{search_term}{result}'})
# define LLM
api_base_url = "http://120.196.116.194:48892/v1"
api_key= "EMPTY"
LLM_MODEL = model_name
llm_config = {
'model': LLM_MODEL,
'model_server': 'openai',
'api_base':api_base_url,
'api_key': api_key
}
function_list = ['WebSearch', 'ChromaQuery']
bot = RolePlay(function_list=function_list,llm=llm_config, instruction=template)
response = bot.run(prompt, history=context, lang='zh')
text = ''
for chunk in response:
text += chunk
return text
async def fast_api_handler(self, request: Request) -> Response:
try:
data = await request.json()
except:
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
user_model_name = data.get("model_name")
user_context = data.get("context")
user_prompt = data.get("prompt")
user_template = data.get("template")
if user_prompt is None:
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
if user_model_name is None or user_model_name.isspace():
user_model_name = "Qwen1.5-14B-Chat"
if user_template is None or user_template.isspace():
# user_template 是定义LLM的语气例如template = "使用小丑的语气说话。"user_template可以为空字串或者是用户自定义的语气或者是使用我们提供的语气
user_template = ""
else:
user_template = f"使用{user_template}的语气说话。"
return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context)}, status_code=status.HTTP_200_OK)

View File

@ -89,4 +89,7 @@ components:
- fastchat
- audio_chat
- g2e
- text_and_image
- text_and_image
- chroma_query
- chroma_chat
- chroma_upsert