mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
@ -1,21 +1,19 @@
|
||||
from . import melotts
|
||||
from blackbox.emotion import Emotion
|
||||
from .chat import Chat
|
||||
from .audio_chat import AudioChat
|
||||
from .sentiment import Sentiment
|
||||
from .tts import TTS
|
||||
from .asr import ASR
|
||||
from .audio_to_text import AudioToText
|
||||
#from .emotion import Emotion
|
||||
from .blackbox import Blackbox
|
||||
from .text_to_audio import TextToAudio
|
||||
from .tesou import Tesou
|
||||
from .fastchat import Fastchat
|
||||
from .g2e import G2E
|
||||
from .text_and_image import TextAndImage
|
||||
# from .chroma_query import ChromaQuery
|
||||
# from .chroma_upsert import ChromaUpsert
|
||||
# from .chroma_chat import ChromaChat
|
||||
from .melotts import MeloTTS
|
||||
from .vlms import VLMS
|
||||
from .chroma_query import ChromaQuery
|
||||
from .chroma_upsert import ChromaUpsert
|
||||
from .chroma_chat import ChromaChat
|
||||
from injector import inject, singleton
|
||||
|
||||
@singleton
|
||||
@ -25,37 +23,35 @@ class BlackboxFactory:
|
||||
@inject
|
||||
def __init__(self,
|
||||
audio_to_text: AudioToText,
|
||||
text_to_audio: TextToAudio,
|
||||
asr: ASR,
|
||||
tts: TTS,
|
||||
sentiment_engine: Sentiment,
|
||||
#emotion: Emotion,
|
||||
tesou: Tesou,
|
||||
emotion: Emotion,
|
||||
fastchat: Fastchat,
|
||||
audio_chat: AudioChat,
|
||||
g2e: G2E,
|
||||
text_and_image: TextAndImage,
|
||||
#chroma_query: ChromaQuery,
|
||||
#chroma_upsert: ChromaUpsert,
|
||||
#chroma_chat: ChromaChat,
|
||||
melotts: MeloTTS,
|
||||
vlms: VLMS) -> None:
|
||||
vlms: VLMS,
|
||||
chroma_query: ChromaQuery,
|
||||
chroma_upsert: ChromaUpsert,
|
||||
chroma_chat: ChromaChat,
|
||||
chat: Chat) -> None:
|
||||
self.models["audio_to_text"] = audio_to_text
|
||||
self.models["text_to_audio"] = text_to_audio
|
||||
self.models["asr"] = asr
|
||||
self.models["tts"] = tts
|
||||
self.models["sentiment_engine"] = sentiment_engine
|
||||
self.models["tesou"] = tesou
|
||||
#self.models["emotion"] = emotion
|
||||
self.models["emotion"] = emotion
|
||||
self.models["fastchat"] = fastchat
|
||||
self.models["audio_chat"] = audio_chat
|
||||
self.models["g2e"] = g2e
|
||||
self.models["text_and_image"] = text_and_image
|
||||
#self.models["chroma_query"] = chroma_query
|
||||
#self.models["chroma_upsert"] = chroma_upsert
|
||||
#self.models["chroma_chat"] = chroma_chat
|
||||
self.models["chroma_query"] = chroma_query
|
||||
self.models["chroma_upsert"] = chroma_upsert
|
||||
self.models["chroma_chat"] = chroma_chat
|
||||
self.models["melotts"] = melotts
|
||||
self.models["vlms"] = vlms
|
||||
self.models["chat"] = chat
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.processing(*args, **kwargs)
|
||||
|
||||
128
src/blackbox/chat.py
Normal file
128
src/blackbox/chat.py
Normal file
@ -0,0 +1,128 @@
|
||||
import logging
|
||||
from typing import Any, Coroutine
|
||||
|
||||
from fastapi import Request, Response, status
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from ..log.logging_time import logging_time
|
||||
from .blackbox import Blackbox
|
||||
|
||||
import requests
|
||||
import json
|
||||
from openai import OpenAI
|
||||
import re
|
||||
|
||||
from injector import singleton
|
||||
|
||||
logger = logging.getLogger
|
||||
|
||||
@singleton
|
||||
class Chat(Blackbox):
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.processing(*args, **kwargs)
|
||||
|
||||
def valid(self, *args, **kwargs) -> bool:
|
||||
data = args[0]
|
||||
return isinstance(data, list)
|
||||
|
||||
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
||||
@logging_time(logger=logger)
|
||||
def processing(self, *args, **kwargs) -> str:
|
||||
|
||||
settings: dict = args[0]
|
||||
if settings is None:
|
||||
settings = {}
|
||||
user_model_name = settings.get("model_name")
|
||||
user_context = settings.get("context")
|
||||
user_question = settings.get("question")
|
||||
user_template = settings.get("template")
|
||||
user_temperature = settings.get("temperature")
|
||||
user_top_p = settings.get("top_p")
|
||||
user_n = settings.get("n")
|
||||
user_max_tokens = settings.get("max_tokens")
|
||||
user_stop = settings.get("stop")
|
||||
user_frequency_penalty = settings.get("frequency_penalty")
|
||||
user_presence_penalty = settings.get("presence_penalty")
|
||||
|
||||
if user_context == None:
|
||||
user_context = []
|
||||
|
||||
if user_question is None:
|
||||
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if user_model_name is None or user_model_name.isspace() or user_model_name == "":
|
||||
user_model_name = "Qwen1.5-14B-Chat"
|
||||
|
||||
if user_template is None or user_template.isspace():
|
||||
user_template = ""
|
||||
|
||||
if user_temperature is None or user_temperature == "":
|
||||
user_temperature = 0.8
|
||||
|
||||
if user_top_p is None or user_top_p == "":
|
||||
user_top_p = 0.8
|
||||
|
||||
if user_n is None or user_n == "":
|
||||
user_n = 1
|
||||
|
||||
if user_max_tokens is None or user_max_tokens == "":
|
||||
user_max_tokens = 1024
|
||||
|
||||
if user_stop is None or user_stop == "":
|
||||
user_stop = 100
|
||||
|
||||
if user_frequency_penalty is None or user_frequency_penalty == "":
|
||||
user_frequency_penalty = 0.5
|
||||
|
||||
if user_presence_penalty is None or user_presence_penalty == "":
|
||||
user_presence_penalty = 0.8
|
||||
|
||||
|
||||
# gpt-4, gpt-3.5-turbo
|
||||
if re.search(r"gpt", user_model_name):
|
||||
url = 'https://api.openai.com/v1/completions'
|
||||
key = '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'
|
||||
else:
|
||||
url = 'http://120.196.116.194:48892/v1/chat/completions'
|
||||
key = 'YOUR_API_KEY'
|
||||
|
||||
prompt_template = [
|
||||
{"role": "system", "content": user_template},
|
||||
]
|
||||
|
||||
chat_inputs={
|
||||
"model": user_model_name,
|
||||
"messages": prompt_template + user_context + [
|
||||
{
|
||||
"role": "user",
|
||||
"content": user_question
|
||||
}
|
||||
],
|
||||
"temperature": user_temperature,
|
||||
"top_p": user_top_p,
|
||||
"n": user_n,
|
||||
"max_tokens": user_max_tokens,
|
||||
"frequency_penalty": user_frequency_penalty,
|
||||
"presence_penalty": user_presence_penalty,
|
||||
"stop": user_stop
|
||||
}
|
||||
|
||||
header = {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': "Bearer " + key
|
||||
}
|
||||
|
||||
fastchat_response = requests.post(url, json=chat_inputs, headers=header)
|
||||
|
||||
return fastchat_response.json()["choices"][0]["message"]["content"]
|
||||
|
||||
async def fast_api_handler(self, request: Request) -> Response:
|
||||
try:
|
||||
data = await request.json()
|
||||
except:
|
||||
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
setting: dict = data.get("settings")
|
||||
|
||||
return JSONResponse(content={"response": self.processing(setting)}, status_code=status.HTTP_200_OK)
|
||||
@ -4,15 +4,18 @@ from fastapi import Request, Response, status
|
||||
from fastapi.responses import JSONResponse
|
||||
from .blackbox import Blackbox
|
||||
|
||||
from ..utils import chroma_setting
|
||||
from .chat import Chat
|
||||
from .chroma_query import ChromaQuery
|
||||
|
||||
DEFAULT_COLLECTION_ID = "123"
|
||||
|
||||
from injector import singleton
|
||||
from injector import singleton,inject
|
||||
@singleton
|
||||
class ChromaChat(Blackbox):
|
||||
|
||||
def __init__(self, fastchat, chroma_query):
|
||||
self.fastchat = fastchat
|
||||
@inject
|
||||
def __init__(self, chat: Chat, chroma_query: ChromaQuery):
|
||||
self.chat = chat
|
||||
self.chroma_query = chroma_query
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
@ -22,18 +25,18 @@ class ChromaChat(Blackbox):
|
||||
data = args[0]
|
||||
return isinstance(data, list)
|
||||
|
||||
def processing(self, question, setting: chroma_setting) -> str:
|
||||
def processing(self, question, context: list) -> str:
|
||||
if context == None:
|
||||
context = []
|
||||
|
||||
# load or create collection
|
||||
if setting is None:
|
||||
collection_id = DEFAULT_COLLECTION_ID
|
||||
else:
|
||||
collection_id = setting.ChromaSetting.collection_ids[0]
|
||||
collection_id = DEFAULT_COLLECTION_ID
|
||||
|
||||
# query it
|
||||
chroma_result = self.chroma_query(question, collection_id)
|
||||
|
||||
fast_question = "问题: "+ question + "。根据问题,总结以下内容:" + chroma_result
|
||||
response = self.fastchat(fast_question)
|
||||
fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源:" + chroma_result
|
||||
response = self.chat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='', context=context, temperature=0.8, top_p=0.8, n=1, max_tokens=1024, stop=100,frequency_penalty=0.5,presence_penalty=0.8)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ class ChromaQuery(Blackbox):
|
||||
def processing(self, question: str, collection_id) -> str:
|
||||
|
||||
# load or create collection
|
||||
collection = self.client.get_or_create_collection(collection_id, embedding_function=self.embedding_model)
|
||||
collection = self.client.get_collection(collection_id, embedding_function=self.embedding_model)
|
||||
|
||||
# query it
|
||||
results = collection.query(
|
||||
@ -39,7 +39,7 @@ class ChromaQuery(Blackbox):
|
||||
n_results=3,
|
||||
)
|
||||
|
||||
response = results["documents"] + results["metadatas"]
|
||||
response = str(results["documents"] + results["metadatas"])
|
||||
return response
|
||||
|
||||
|
||||
|
||||
@ -14,6 +14,9 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTex
|
||||
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
|
||||
import chromadb
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from ..utils import chroma_setting
|
||||
|
||||
from injector import singleton
|
||||
@ -50,7 +53,7 @@ class ChromaUpsert(Blackbox):
|
||||
"collection_id": "123",
|
||||
"action": "upsert",
|
||||
"content": "file_name or string",
|
||||
"answer": "success, collection has 100 documents.",
|
||||
"answer": "collection 123 has 12472 documents. /tmp/Cheap and Quick:Efficient Vision-Language Instruction Tuning for Large Language Models.pdf ids is 0~111",
|
||||
},
|
||||
]
|
||||
|
||||
@ -61,6 +64,7 @@ class ChromaUpsert(Blackbox):
|
||||
|
||||
if file is not None:
|
||||
file_type = file.split(".")[-1]
|
||||
print("file_type: ",file_type)
|
||||
if file_type == "pdf":
|
||||
loader = PyPDFLoader(file)
|
||||
elif file_type == "txt":
|
||||
@ -77,7 +81,6 @@ class ChromaUpsert(Blackbox):
|
||||
loader = UnstructuredExcelLoader(file)
|
||||
|
||||
|
||||
loader = PyPDFLoader(file)
|
||||
documents = loader.load()
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0)
|
||||
|
||||
@ -87,31 +90,37 @@ class ChromaUpsert(Blackbox):
|
||||
|
||||
Chroma.from_documents(documents=docs, embedding=self.embedding_model, ids=ids, collection_name=collection_id, client=self.client)
|
||||
|
||||
collection_number = self.client.get_collection(collection_id).count()
|
||||
response_file = f"collection {collection_id} has {collection_number} documents. {file} ids is 0~{len(docs)-1}"
|
||||
|
||||
if string is not None:
|
||||
# 生成一个新的id ids_string: 1
|
||||
ids = setting.ChromaSetting.string_ids[0] + 1
|
||||
# ids = setting.ChromaSetting.string_ids[0] + 1
|
||||
ids = "1"
|
||||
|
||||
Chroma.from_texts(texts=[string], embedding=self.embedding_model, ids=[ids], collection_name=collection_id, client=self.client)
|
||||
|
||||
|
||||
collection_number = self.client.get_collection(collection_id).count()
|
||||
response = f"collection {collection_id} has {collection_number} documents."
|
||||
collection_number = self.client.get_collection(collection_id).count()
|
||||
response_string = f"collection {collection_id} has {collection_number} documents. {string} ids is {ids}"
|
||||
|
||||
return response
|
||||
|
||||
if file is not None and string is not None:
|
||||
return response_file + " \n and " + response_string
|
||||
elif file is not None and string is None:
|
||||
return response_file
|
||||
elif file is None and string is not None:
|
||||
return response_string
|
||||
|
||||
|
||||
|
||||
async def fast_api_handler(self, request: Request) -> Response:
|
||||
try:
|
||||
data = await request.json()
|
||||
except:
|
||||
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
user_collection_id = data.get("collection_id")
|
||||
user_file = data.get("file")
|
||||
user_string = data.get("string")
|
||||
user_context = data.get("context")
|
||||
user_setting = data.get("setting")
|
||||
user_collection_id = (await request.form()).get("collection_id")
|
||||
user_file = (await request.form()).get("file")
|
||||
user_string = (await request.form()).get("string")
|
||||
user_context = (await request.form()).get("context")
|
||||
user_setting = (await request.form()).get("setting")
|
||||
|
||||
if user_collection_id is None and user_setting["collections"] == []:
|
||||
return JSONResponse(content={"error": "The first creation requires a collection id"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
@ -119,6 +128,19 @@ class ChromaUpsert(Blackbox):
|
||||
if user_file is None and user_string is None:
|
||||
return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if user_file is not None:
|
||||
pdf_bytes = await user_file.read()
|
||||
|
||||
custom_filename = user_file.filename
|
||||
# 获取系统的临时目录路径
|
||||
safe_filename = os.path.join(tempfile.gettempdir(), os.path.basename(custom_filename))
|
||||
|
||||
with open(safe_filename, "wb") as f:
|
||||
f.write(pdf_bytes)
|
||||
else:
|
||||
safe_filename = None
|
||||
|
||||
|
||||
return JSONResponse(
|
||||
content={"response": self.processing(user_collection_id, user_file, user_string, user_context, user_setting)},
|
||||
content={"response": self.processing(user_collection_id, safe_filename, user_string, user_context, user_setting)},
|
||||
status_code=status.HTTP_200_OK)
|
||||
@ -19,40 +19,68 @@ class Fastchat(Blackbox):
|
||||
return isinstance(data, list)
|
||||
|
||||
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
||||
def processing(self, model_name, prompt, template, context: list) -> str:
|
||||
def processing(self, model_name, prompt, template, context: list, temperature, top_p, top_k, n, max_tokens) -> str:
|
||||
if context == None:
|
||||
context = []
|
||||
url = 'http://120.196.116.194:48892/v1/chat/completions'
|
||||
|
||||
# history可以为空列表,也可以是用户的对话历史
|
||||
# history = [
|
||||
# context可以为空列表,也可以是用户的对话历史
|
||||
# context = [
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": "你吃饭了吗"
|
||||
# "content": "智能体核心思想"
|
||||
# },
|
||||
# {
|
||||
# "role": "assistant",
|
||||
# "content": "作为一个AI模型,我没有吃饭的需要,因为我并不具备实体形态。我专注于提供信息和帮助回答你的问题。你有什么需要帮助的吗?"
|
||||
# "content": "智能体的核心思想是将人工智能应用于问题求解者角色,它通过算法模拟人类决策过程,通过感知环境、学习、规划和执行行动,以实现特定任务或目标。其目标是通过自我适应和优化,实现高效问题解决。"
|
||||
# },
|
||||
# ]
|
||||
|
||||
prompt_template = [
|
||||
{"role": "system", "content": template},
|
||||
]
|
||||
|
||||
fastchat_inputs={
|
||||
"model": model_name,
|
||||
"messages": context + [
|
||||
"messages": prompt_template + context + [
|
||||
{
|
||||
"role": "user",
|
||||
"content": template + prompt
|
||||
"content": prompt
|
||||
}
|
||||
]
|
||||
],
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"top_k": top_k,
|
||||
"n": n,
|
||||
"max_tokens": max_tokens,
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
|
||||
# {
|
||||
# "model": "string",
|
||||
# "messages": "string",
|
||||
# "temperature": 0.7, # between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
|
||||
# "top_p": 1, # 控制生成下一个单词的概率分布,即从所有可能的单词中,只选择概率最高的一部分作为候选单词
|
||||
# "top_k": -1, # top-k 参数设置为 3意味着选择前三个tokens。
|
||||
# "n": 1, # How many chat completion choices to generate for each input message.
|
||||
# "max_tokens": 1024, # The maximum number of tokens to generate in the chat completion.
|
||||
# "stop": [
|
||||
# "string"
|
||||
# ],
|
||||
# "stream": False,
|
||||
# "presence_penalty": 0, # Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
|
||||
# "frequency_penalty": 0, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.
|
||||
# "user": "string"
|
||||
# }
|
||||
|
||||
fastchat_response = requests.post(url, json=fastchat_inputs)
|
||||
|
||||
user_message = fastchat_inputs["messages"]
|
||||
context.append(user_message)
|
||||
# user_message = fastchat_inputs["messages"]
|
||||
# context.append(user_message)
|
||||
|
||||
assistant_message = fastchat_response.json()["choices"][0]["message"]
|
||||
context.append(assistant_message)
|
||||
# context.append(assistant_message)
|
||||
|
||||
fastchat_content = assistant_message["content"]
|
||||
|
||||
@ -66,19 +94,40 @@ class Fastchat(Blackbox):
|
||||
|
||||
user_model_name = data.get("model_name")
|
||||
user_context = data.get("context")
|
||||
user_prompt = data.get("prompt")
|
||||
user_question = data.get("question")
|
||||
user_template = data.get("template")
|
||||
user_temperature = data.get("temperature")
|
||||
user_top_p = data.get("top_p")
|
||||
user_top_k = data.get("top_k")
|
||||
user_n = data.get("n")
|
||||
user_max_tokens = data.get("max_tokens")
|
||||
|
||||
if user_prompt is None:
|
||||
|
||||
if user_question is None:
|
||||
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if user_model_name is None or user_model_name.isspace():
|
||||
if user_model_name is None or user_model_name.isspace() or user_model_name == "":
|
||||
user_model_name = "Qwen1.5-14B-Chat"
|
||||
|
||||
if user_template is None or user_template.isspace():
|
||||
# user_template 是定义LLM的语气,例如template = "使用小丑的语气说话。",user_template可以为空字串,或者是用户自定义的语气,或者是使用我们提供的语气
|
||||
# user_template 是定义LLM的语气,例如template = "使用小丑的语气说话。",user_template可以为空字串,或者是用户自定义的语气
|
||||
user_template = ""
|
||||
else:
|
||||
user_template = f"使用{user_template}的语气说话。"
|
||||
|
||||
return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context)}, status_code=status.HTTP_200_OK)
|
||||
if user_temperature is None or user_temperature == "":
|
||||
user_temperature = 0.7
|
||||
|
||||
if user_top_p is None or user_top_p == "":
|
||||
user_top_p = 1
|
||||
|
||||
if user_top_k is None or user_top_k == "":
|
||||
user_top_k = -1
|
||||
|
||||
if user_n is None or user_n == "":
|
||||
user_n = 1
|
||||
|
||||
if user_max_tokens is None or user_max_tokens == "":
|
||||
user_max_tokens = 1024
|
||||
|
||||
|
||||
return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context,
|
||||
user_temperature, user_top_p, user_top_k, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK)
|
||||
145
src/blackbox/modelscope.py
Executable file
145
src/blackbox/modelscope.py
Executable file
@ -0,0 +1,145 @@
|
||||
from typing import Any, Coroutine
|
||||
|
||||
from fastapi import Request, Response, status
|
||||
from fastapi.responses import JSONResponse
|
||||
from .blackbox import Blackbox
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
from modelscope_agent.agents import RolePlay
|
||||
from modelscope_agent.tools.base import BaseTool
|
||||
from modelscope_agent.tools import register_tool
|
||||
|
||||
class Modelscope(Blackbox):
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.processing(*args, **kwargs)
|
||||
|
||||
def valid(self, *args, **kwargs) -> bool:
|
||||
data = args[0]
|
||||
return isinstance(data, list)
|
||||
|
||||
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
||||
def processing(self, model_name, prompt, template, context: list) -> str:
|
||||
if context == None:
|
||||
context = []
|
||||
|
||||
@register_tool('ChromaQuery')
|
||||
class AliyunRenewInstanceTool(BaseTool):
|
||||
description = '查询chroma数据库中的数据'
|
||||
name = 'ChromaQuery'
|
||||
parameters: list = [{
|
||||
'name': 'id',
|
||||
'description': '用户的chroma id',
|
||||
'required': True,
|
||||
'type': 'string'
|
||||
}, {
|
||||
'name': 'query',
|
||||
'description': '用户需要在chroma中查询的问题',
|
||||
'required': True,
|
||||
'type': 'string'
|
||||
}]
|
||||
|
||||
def call(self, params: str, **kwargs):
|
||||
params = self._verify_args(params)
|
||||
id = params['id']
|
||||
query = params['query']
|
||||
query_data = {
|
||||
"chroma_query_data": {
|
||||
"id": id,
|
||||
"question": query
|
||||
}
|
||||
}
|
||||
url = "http://10.6.80.75:7003"
|
||||
response = requests.post(f"{url}/api/chroma_query", json=query_data)
|
||||
result = response.json()['response']
|
||||
return str({'result': f'Chroma ID为{id}的用户,查询结果为{response}。'})
|
||||
|
||||
|
||||
@register_tool('WebSearch')
|
||||
class WebSearchTool(BaseTool):
|
||||
description = '查询网络中的内容'
|
||||
name = 'WebSearch'
|
||||
parameters: list = [ {
|
||||
'name': 'search_term',
|
||||
'description': '用户需要在Web中查询的问题',
|
||||
'required': True,
|
||||
'type': 'string'
|
||||
}]
|
||||
|
||||
def call(self, params: str, **kwargs):
|
||||
params = self._verify_args(params)
|
||||
search_term = params['search_term']
|
||||
|
||||
api_key='9e51be0aaecb5a56fe2faead6e2c702fde92e62a'
|
||||
headers = {
|
||||
'X-API-KEY': api_key,
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
params = {
|
||||
'q': search_term
|
||||
}
|
||||
try:
|
||||
response = requests.post(
|
||||
f'https://google.serper.dev/search',
|
||||
headers=headers,
|
||||
params=params,
|
||||
timeout=5)
|
||||
except Exception as e:
|
||||
return -1, str(e)
|
||||
|
||||
result = response.json()['answerBox']['snippet']
|
||||
|
||||
return str({'result': f'WebSearch查询结果为{search_term}{result}。'})
|
||||
|
||||
|
||||
# define LLM
|
||||
api_base_url = "http://120.196.116.194:48892/v1"
|
||||
api_key= "EMPTY"
|
||||
LLM_MODEL = model_name
|
||||
|
||||
llm_config = {
|
||||
'model': LLM_MODEL,
|
||||
'model_server': 'openai',
|
||||
'api_base':api_base_url,
|
||||
'api_key': api_key
|
||||
}
|
||||
|
||||
function_list = ['WebSearch', 'ChromaQuery']
|
||||
|
||||
bot = RolePlay(function_list=function_list,llm=llm_config, instruction=template)
|
||||
|
||||
response = bot.run(prompt, history=context, lang='zh')
|
||||
|
||||
text = ''
|
||||
for chunk in response:
|
||||
text += chunk
|
||||
|
||||
return text
|
||||
|
||||
|
||||
async def fast_api_handler(self, request: Request) -> Response:
|
||||
try:
|
||||
data = await request.json()
|
||||
except:
|
||||
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
user_model_name = data.get("model_name")
|
||||
user_context = data.get("context")
|
||||
user_prompt = data.get("prompt")
|
||||
user_template = data.get("template")
|
||||
|
||||
if user_prompt is None:
|
||||
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if user_model_name is None or user_model_name.isspace():
|
||||
user_model_name = "Qwen1.5-14B-Chat"
|
||||
|
||||
if user_template is None or user_template.isspace():
|
||||
# user_template 是定义LLM的语气,例如template = "使用小丑的语气说话。",user_template可以为空字串,或者是用户自定义的语气,或者是使用我们提供的语气
|
||||
user_template = ""
|
||||
else:
|
||||
user_template = f"使用{user_template}的语气说话。"
|
||||
|
||||
return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context)}, status_code=status.HTTP_200_OK)
|
||||
@ -80,13 +80,10 @@ components:
|
||||
type: string
|
||||
description: "Blackbox name"
|
||||
enum:
|
||||
- text_to_audio
|
||||
- audio_to_text
|
||||
- asr
|
||||
- tts
|
||||
- sentiment_engine
|
||||
- emotion
|
||||
- tesou
|
||||
- fastchat
|
||||
- audio_chat
|
||||
- g2e
|
||||
|
||||
Reference in New Issue
Block a user