From 21ac4a53192a57fb65e866c228bc50839de7a46b Mon Sep 17 00:00:00 2001 From: ACBBZ Date: Fri, 10 May 2024 07:21:21 +0000 Subject: [PATCH 1/5] new branch --- src/blackbox/blackbox_factory.py | 54 ++++++------ src/blackbox/chroma_chat.py | 14 ++- src/blackbox/chroma_query.py | 2 +- src/blackbox/chroma_upsert.py | 32 ++++--- src/blackbox/fastchat.py | 80 +++++++++++++---- src/blackbox/modelscope.py | 145 +++++++++++++++++++++++++++++++ swagger.yml | 5 +- 7 files changed, 272 insertions(+), 60 deletions(-) create mode 100755 src/blackbox/modelscope.py diff --git a/src/blackbox/blackbox_factory.py b/src/blackbox/blackbox_factory.py index aabe569..624a760 100644 --- a/src/blackbox/blackbox_factory.py +++ b/src/blackbox/blackbox_factory.py @@ -1,14 +1,14 @@ -from .audio_chat import AudioChat -from .sentiment import Sentiment -from .tts import TTS -from .asr import ASR -from .audio_to_text import AudioToText +# from .audio_chat import AudioChat +# from .sentiment import Sentiment +# from .tts import TTS +# from .asr import ASR +# from .audio_to_text import AudioToText from .blackbox import Blackbox -from .text_to_audio import TextToAudio -from .tesou import Tesou +# from .text_to_audio import TextToAudio +# from .tesou import Tesou from .fastchat import Fastchat -from .g2e import G2E -from .text_and_image import TextAndImage +# from .g2e import G2E +# from .text_and_image import TextAndImage from .chroma_query import ChromaQuery from .chroma_upsert import ChromaUpsert from .chroma_chat import ChromaChat @@ -20,29 +20,29 @@ class BlackboxFactory: @inject def __init__(self, - audio_to_text: AudioToText, - text_to_audio: TextToAudio, - asr: ASR, - tts: TTS, - sentiment_engine: Sentiment, - tesou: Tesou, + # audio_to_text: AudioToText, + # text_to_audio: TextToAudio, + # asr: ASR, + # tts: TTS, + # sentiment_engine: Sentiment, + # tesou: Tesou, fastchat: Fastchat, - audio_chat: AudioChat, - g2e: G2E, - text_and_image:TextAndImage, + # audio_chat: AudioChat, + # g2e: G2E, + # text_and_image:TextAndImage, chroma_query: ChromaQuery, chroma_upsert: ChromaUpsert, chroma_chat: ChromaChat) -> None: - self.models["audio_to_text"] = audio_to_text - self.models["text_to_audio"] = text_to_audio - self.models["asr"] = asr - self.models["tts"] = tts - self.models["sentiment_engine"] = sentiment_engine - self.models["tesou"] = tesou + # self.models["audio_to_text"] = audio_to_text + # self.models["text_to_audio"] = text_to_audio + # self.models["asr"] = asr + # self.models["tts"] = tts + # self.models["sentiment_engine"] = sentiment_engine + # self.models["tesou"] = tesou self.models["fastchat"] = fastchat - self.models["audio_chat"] = audio_chat - self.models["g2e"] = g2e - self.models["text_and_image"] = text_and_image + # self.models["audio_chat"] = audio_chat + # self.models["g2e"] = g2e + # self.models["text_and_image"] = text_and_image self.models["chroma_query"] = chroma_query self.models["chroma_upsert"] = chroma_upsert self.models["chroma_chat"] = chroma_chat diff --git a/src/blackbox/chroma_chat.py b/src/blackbox/chroma_chat.py index 560712c..b388675 100755 --- a/src/blackbox/chroma_chat.py +++ b/src/blackbox/chroma_chat.py @@ -5,13 +5,17 @@ from fastapi.responses import JSONResponse from .blackbox import Blackbox from ..utils import chroma_setting +from .fastchat import Fastchat +from .chroma_query import ChromaQuery + DEFAULT_COLLECTION_ID = "123" -from injector import singleton +from injector import singleton,inject @singleton class ChromaChat(Blackbox): - def __init__(self, fastchat, chroma_query): + @inject + def __init__(self, fastchat: Fastchat, chroma_query: ChromaQuery): self.fastchat = fastchat self.chroma_query = chroma_query @@ -29,11 +33,13 @@ class ChromaChat(Blackbox): collection_id = DEFAULT_COLLECTION_ID else: collection_id = setting.ChromaSetting.collection_ids[0] + print("collection_id: ",collection_id) # query it chroma_result = self.chroma_query(question, collection_id) + print("chroma_result: ",type(chroma_result),chroma_result) - fast_question = "问题: "+ question + "。根据问题,总结以下内容:" + chroma_result - response = self.fastchat(fast_question) + fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源:" + chroma_result + response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字', context=None) return response diff --git a/src/blackbox/chroma_query.py b/src/blackbox/chroma_query.py index 8597677..0c72d80 100755 --- a/src/blackbox/chroma_query.py +++ b/src/blackbox/chroma_query.py @@ -39,7 +39,7 @@ class ChromaQuery(Blackbox): n_results=3, ) - response = results["documents"] + results["metadatas"] + response = str(results["documents"] + results["metadatas"]) return response diff --git a/src/blackbox/chroma_upsert.py b/src/blackbox/chroma_upsert.py index 60607d5..9145d61 100755 --- a/src/blackbox/chroma_upsert.py +++ b/src/blackbox/chroma_upsert.py @@ -59,8 +59,11 @@ class ChromaUpsert(Blackbox): else: collection_id = "123" + print("file: ",file) + print("file name: ",file.filename) if file is not None: - file_type = file.split(".")[-1] + file_type = file.filename.split(".")[-1] + print("file_type: ",file_type) if file_type == "pdf": loader = PyPDFLoader(file) elif file_type == "txt": @@ -102,16 +105,12 @@ class ChromaUpsert(Blackbox): async def fast_api_handler(self, request: Request) -> Response: - try: - data = await request.json() - except: - return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST) - - user_collection_id = data.get("collection_id") - user_file = data.get("file") - user_string = data.get("string") - user_context = data.get("context") - user_setting = data.get("setting") + + user_collection_id = (await request.form()).get("collection_id") + user_file = (await request.form()).get("file") + user_string = (await request.form()).get("string") + user_context = (await request.form()).get("context") + user_setting = (await request.form()).get("setting") if user_collection_id is None and user_setting["collections"] == []: return JSONResponse(content={"error": "The first creation requires a collection id"}, status_code=status.HTTP_400_BAD_REQUEST) @@ -119,6 +118,17 @@ class ChromaUpsert(Blackbox): if user_file is None and user_string is None: return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST) + # data = await user_file.read() + # with open(f'./{data.filename}', 'wb') as f: + # f.write(content) + + loader = PyPDFLoader(f'./{user_file.filename}') + documents = loader.load() + text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0) + docs = text_splitter.split_documents(documents) + print("docs: ",docs) + + return JSONResponse( content={"response": self.processing(user_collection_id, user_file, user_string, user_context, user_setting)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/src/blackbox/fastchat.py b/src/blackbox/fastchat.py index 27c95c1..6b0ba6f 100755 --- a/src/blackbox/fastchat.py +++ b/src/blackbox/fastchat.py @@ -19,40 +19,68 @@ class Fastchat(Blackbox): return isinstance(data, list) # model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b - def processing(self, model_name, prompt, template, context: list) -> str: + def processing(self, model_name, prompt, template, context: list, temperature, top_p, top_k, n, max_tokens) -> str: if context == None: context = [] url = 'http://120.196.116.194:48892/v1/chat/completions' - # history可以为空列表,也可以是用户的对话历史 - # history = [ + # context可以为空列表,也可以是用户的对话历史 + # context = [ # { # "role": "user", - # "content": "你吃饭了吗" + # "content": "智能体核心思想" # }, # { # "role": "assistant", - # "content": "作为一个AI模型,我没有吃饭的需要,因为我并不具备实体形态。我专注于提供信息和帮助回答你的问题。你有什么需要帮助的吗?" + # "content": "智能体的核心思想是将人工智能应用于问题求解者角色,它通过算法模拟人类决策过程,通过感知环境、学习、规划和执行行动,以实现特定任务或目标。其目标是通过自我适应和优化,实现高效问题解决。" # }, # ] + prompt_template = [ + {"role": "system", "content": template}, + ] + fastchat_inputs={ "model": model_name, - "messages": context + [ + "messages": prompt_template + context + [ { "role": "user", - "content": template + prompt + "content": prompt } - ] + ], + "temperature": temperature, + "top_p": top_p, + "top_k": top_k, + "n": n, + "max_tokens": max_tokens, + "stream": False, } + + # { + # "model": "string", + # "messages": "string", + # "temperature": 0.7, # between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + # "top_p": 1, # 控制生成下一个单词的概率分布,即从所有可能的单词中,只选择概率最高的一部分作为候选单词 + # "top_k": -1, # top-k 参数设置为 3意味着选择前三个tokens。 + # "n": 1, # How many chat completion choices to generate for each input message. + # "max_tokens": 1024, # The maximum number of tokens to generate in the chat completion. + # "stop": [ + # "string" + # ], + # "stream": False, + # "presence_penalty": 0, # Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + # "frequency_penalty": 0, # Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim. + # "user": "string" + # } + fastchat_response = requests.post(url, json=fastchat_inputs) - user_message = fastchat_inputs["messages"] - context.append(user_message) + # user_message = fastchat_inputs["messages"] + # context.append(user_message) assistant_message = fastchat_response.json()["choices"][0]["message"] - context.append(assistant_message) + # context.append(assistant_message) fastchat_content = assistant_message["content"] @@ -68,17 +96,37 @@ class Fastchat(Blackbox): user_context = data.get("context") user_prompt = data.get("prompt") user_template = data.get("template") + user_temperature = data.get("temperature") + user_top_p = data.get("top_p") + user_top_k = data.get("top_k") + user_n = data.get("n") + user_max_tokens = data.get("max_tokens") if user_prompt is None: return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) - if user_model_name is None or user_model_name.isspace(): + if user_model_name is None or user_model_name.isspace() or user_model_name == "": user_model_name = "Qwen1.5-14B-Chat" if user_template is None or user_template.isspace(): - # user_template 是定义LLM的语气,例如template = "使用小丑的语气说话。",user_template可以为空字串,或者是用户自定义的语气,或者是使用我们提供的语气 + # user_template 是定义LLM的语气,例如template = "使用小丑的语气说话。",user_template可以为空字串,或者是用户自定义的语气 user_template = "" - else: - user_template = f"使用{user_template}的语气说话。" + + if user_temperature is None or user_temperature == "": + user_temperature = 0.7 - return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context)}, status_code=status.HTTP_200_OK) \ No newline at end of file + if user_top_p is None or user_top_p == "": + user_top_p = 1 + + if user_top_k is None or user_top_k == "": + user_top_k = -1 + + if user_n is None or user_n == "": + user_n = 3 + + if user_max_tokens is None or user_max_tokens == "": + user_max_tokens = 1024 + + + return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context, + user_temperature, user_top_p, user_top_k, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/src/blackbox/modelscope.py b/src/blackbox/modelscope.py new file mode 100755 index 0000000..9e5e61d --- /dev/null +++ b/src/blackbox/modelscope.py @@ -0,0 +1,145 @@ +from typing import Any, Coroutine + +from fastapi import Request, Response, status +from fastapi.responses import JSONResponse +from .blackbox import Blackbox + +import requests +import json + +from modelscope_agent.agents import RolePlay +from modelscope_agent.tools.base import BaseTool +from modelscope_agent.tools import register_tool + +class Modelscope(Blackbox): + + def __call__(self, *args, **kwargs): + return self.processing(*args, **kwargs) + + def valid(self, *args, **kwargs) -> bool: + data = args[0] + return isinstance(data, list) + + # model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b + def processing(self, model_name, prompt, template, context: list) -> str: + if context == None: + context = [] + + @register_tool('ChromaQuery') + class AliyunRenewInstanceTool(BaseTool): + description = '查询chroma数据库中的数据' + name = 'ChromaQuery' + parameters: list = [{ + 'name': 'id', + 'description': '用户的chroma id', + 'required': True, + 'type': 'string' + }, { + 'name': 'query', + 'description': '用户需要在chroma中查询的问题', + 'required': True, + 'type': 'string' + }] + + def call(self, params: str, **kwargs): + params = self._verify_args(params) + id = params['id'] + query = params['query'] + query_data = { + "chroma_query_data": { + "id": id, + "question": query + } + } + url = "http://10.6.80.75:7003" + response = requests.post(f"{url}/api/chroma_query", json=query_data) + result = response.json()['response'] + return str({'result': f'Chroma ID为{id}的用户,查询结果为{response}。'}) + + + @register_tool('WebSearch') + class WebSearchTool(BaseTool): + description = '查询网络中的内容' + name = 'WebSearch' + parameters: list = [ { + 'name': 'search_term', + 'description': '用户需要在Web中查询的问题', + 'required': True, + 'type': 'string' + }] + + def call(self, params: str, **kwargs): + params = self._verify_args(params) + search_term = params['search_term'] + + api_key='9e51be0aaecb5a56fe2faead6e2c702fde92e62a' + headers = { + 'X-API-KEY': api_key, + 'Content-Type': 'application/json', + } + params = { + 'q': search_term + } + try: + response = requests.post( + f'https://google.serper.dev/search', + headers=headers, + params=params, + timeout=5) + except Exception as e: + return -1, str(e) + + result = response.json()['answerBox']['snippet'] + + return str({'result': f'WebSearch查询结果为{search_term}{result}。'}) + + + # define LLM + api_base_url = "http://120.196.116.194:48892/v1" + api_key= "EMPTY" + LLM_MODEL = model_name + + llm_config = { + 'model': LLM_MODEL, + 'model_server': 'openai', + 'api_base':api_base_url, + 'api_key': api_key + } + + function_list = ['WebSearch', 'ChromaQuery'] + + bot = RolePlay(function_list=function_list,llm=llm_config, instruction=template) + + response = bot.run(prompt, history=context, lang='zh') + + text = '' + for chunk in response: + text += chunk + + return text + + + async def fast_api_handler(self, request: Request) -> Response: + try: + data = await request.json() + except: + return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST) + + user_model_name = data.get("model_name") + user_context = data.get("context") + user_prompt = data.get("prompt") + user_template = data.get("template") + + if user_prompt is None: + return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) + + if user_model_name is None or user_model_name.isspace(): + user_model_name = "Qwen1.5-14B-Chat" + + if user_template is None or user_template.isspace(): + # user_template 是定义LLM的语气,例如template = "使用小丑的语气说话。",user_template可以为空字串,或者是用户自定义的语气,或者是使用我们提供的语气 + user_template = "" + else: + user_template = f"使用{user_template}的语气说话。" + + return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/swagger.yml b/swagger.yml index 6546996..454d73b 100644 --- a/swagger.yml +++ b/swagger.yml @@ -89,4 +89,7 @@ components: - fastchat - audio_chat - g2e - - text_and_image \ No newline at end of file + - text_and_image + - chroma_query + - chroma_chat + - chroma_upsert \ No newline at end of file From b7d789fb0442c7cff421c8c18ec7e3226becf91a Mon Sep 17 00:00:00 2001 From: ACBBZ Date: Tue, 14 May 2024 08:14:05 +0000 Subject: [PATCH 2/5] update --- src/blackbox/chroma_chat.py | 4 +-- src/blackbox/chroma_query.py | 2 +- src/blackbox/chroma_upsert.py | 50 ++++++++++++++++++++++------------- src/blackbox/fastchat.py | 9 ++++--- test.pdf | 0 5 files changed, 38 insertions(+), 27 deletions(-) create mode 100644 test.pdf diff --git a/src/blackbox/chroma_chat.py b/src/blackbox/chroma_chat.py index b388675..33b9840 100755 --- a/src/blackbox/chroma_chat.py +++ b/src/blackbox/chroma_chat.py @@ -33,13 +33,11 @@ class ChromaChat(Blackbox): collection_id = DEFAULT_COLLECTION_ID else: collection_id = setting.ChromaSetting.collection_ids[0] - print("collection_id: ",collection_id) # query it chroma_result = self.chroma_query(question, collection_id) - print("chroma_result: ",type(chroma_result),chroma_result) fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源:" + chroma_result - response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字', context=None) + response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字.', context=None, temperature=0.8, top_p=0.8, top_k=-1, n=1, max_tokens=1024) return response diff --git a/src/blackbox/chroma_query.py b/src/blackbox/chroma_query.py index 0c72d80..6e39eae 100755 --- a/src/blackbox/chroma_query.py +++ b/src/blackbox/chroma_query.py @@ -31,7 +31,7 @@ class ChromaQuery(Blackbox): def processing(self, question: str, collection_id) -> str: # load or create collection - collection = self.client.get_or_create_collection(collection_id, embedding_function=self.embedding_model) + collection = self.client.get_collection(collection_id, embedding_function=self.embedding_model) # query it results = collection.query( diff --git a/src/blackbox/chroma_upsert.py b/src/blackbox/chroma_upsert.py index 9145d61..c6e71f9 100755 --- a/src/blackbox/chroma_upsert.py +++ b/src/blackbox/chroma_upsert.py @@ -13,6 +13,9 @@ from langchain_community.vectorstores import Chroma from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings import chromadb + +import os +import tempfile from ..utils import chroma_setting @@ -50,7 +53,7 @@ class ChromaUpsert(Blackbox): "collection_id": "123", "action": "upsert", "content": "file_name or string", - "answer": "success, collection has 100 documents.", + "answer": "collection 123 has 12472 documents. /tmp/Cheap and Quick:Efficient Vision-Language Instruction Tuning for Large Language Models.pdf ids is 0~111", }, ] @@ -59,10 +62,8 @@ class ChromaUpsert(Blackbox): else: collection_id = "123" - print("file: ",file) - print("file name: ",file.filename) if file is not None: - file_type = file.filename.split(".")[-1] + file_type = file.split(".")[-1] print("file_type: ",file_type) if file_type == "pdf": loader = PyPDFLoader(file) @@ -80,7 +81,6 @@ class ChromaUpsert(Blackbox): loader = UnstructuredExcelLoader(file) - loader = PyPDFLoader(file) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0) @@ -89,18 +89,28 @@ class ChromaUpsert(Blackbox): ids = [str(file)+str(i) for i in range(len(docs))] Chroma.from_documents(documents=docs, embedding=self.embedding_model, ids=ids, collection_name=collection_id, client=self.client) + + collection_number = self.client.get_collection(collection_id).count() + response_file = f"collection {collection_id} has {collection_number} documents. {file} ids is 0~{len(docs)-1}" if string is not None: # 生成一个新的id ids_string: 1 - ids = setting.ChromaSetting.string_ids[0] + 1 + # ids = setting.ChromaSetting.string_ids[0] + 1 + ids = "1" Chroma.from_texts(texts=[string], embedding=self.embedding_model, ids=[ids], collection_name=collection_id, client=self.client) - collection_number = self.client.get_collection(collection_id).count() - response = f"collection {collection_id} has {collection_number} documents." + collection_number = self.client.get_collection(collection_id).count() + response_string = f"collection {collection_id} has {collection_number} documents. {string} ids is {ids}" - return response + + if file is not None and string is not None: + return response_file + " \n and " + response_string + elif file is not None and string is None: + return response_file + elif file is None and string is not None: + return response_string @@ -117,18 +127,20 @@ class ChromaUpsert(Blackbox): if user_file is None and user_string is None: return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST) - - # data = await user_file.read() - # with open(f'./{data.filename}', 'wb') as f: - # f.write(content) - loader = PyPDFLoader(f'./{user_file.filename}') - documents = loader.load() - text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0) - docs = text_splitter.split_documents(documents) - print("docs: ",docs) + if user_file is not None: + pdf_bytes = await user_file.read() + + custom_filename = user_file.filename + # 获取系统的临时目录路径 + safe_filename = os.path.join(tempfile.gettempdir(), os.path.basename(custom_filename)) + + with open(safe_filename, "wb") as f: + f.write(pdf_bytes) + else: + safe_filename = None return JSONResponse( - content={"response": self.processing(user_collection_id, user_file, user_string, user_context, user_setting)}, + content={"response": self.processing(user_collection_id, safe_filename, user_string, user_context, user_setting)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/src/blackbox/fastchat.py b/src/blackbox/fastchat.py index 6b0ba6f..3144d5e 100755 --- a/src/blackbox/fastchat.py +++ b/src/blackbox/fastchat.py @@ -94,15 +94,16 @@ class Fastchat(Blackbox): user_model_name = data.get("model_name") user_context = data.get("context") - user_prompt = data.get("prompt") + user_question = data.get("question") user_template = data.get("template") user_temperature = data.get("temperature") user_top_p = data.get("top_p") user_top_k = data.get("top_k") user_n = data.get("n") user_max_tokens = data.get("max_tokens") + - if user_prompt is None: + if user_question is None: return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) if user_model_name is None or user_model_name.isspace() or user_model_name == "": @@ -122,11 +123,11 @@ class Fastchat(Blackbox): user_top_k = -1 if user_n is None or user_n == "": - user_n = 3 + user_n = 1 if user_max_tokens is None or user_max_tokens == "": user_max_tokens = 1024 - return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context, + return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context, user_temperature, user_top_p, user_top_k, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/test.pdf b/test.pdf new file mode 100644 index 0000000..e69de29 From 4f7f64a49ae71263f5a1bbce7f9f2fd40b3bf56f Mon Sep 17 00:00:00 2001 From: ACBBZ Date: Tue, 21 May 2024 03:19:54 +0000 Subject: [PATCH 3/5] add chat --- src/blackbox/chat.py | 103 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 src/blackbox/chat.py diff --git a/src/blackbox/chat.py b/src/blackbox/chat.py new file mode 100644 index 0000000..316fcc4 --- /dev/null +++ b/src/blackbox/chat.py @@ -0,0 +1,103 @@ +from typing import Any, Coroutine + +from fastapi import Request, Response, status +from fastapi.responses import JSONResponse +from .blackbox import Blackbox + +import requests +import json +from openai import OpenAI +import re + +from injector import singleton +@singleton +class Chat(Blackbox): + + def __call__(self, *args, **kwargs): + return self.processing(*args, **kwargs) + + def valid(self, *args, **kwargs) -> bool: + data = args[0] + return isinstance(data, list) + + # model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b + def processing(self, model_name, prompt, template, context: list, temperature, top_p, n, max_tokens) -> str: + if context == None: + context = [] + + # gpt-4, gpt-3.5-turbo + if re.search(r"gpt", model_name): + url = 'https://api.openai.com/v1/completions' + key = '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' + else: + url = 'http://120.196.116.194:48892/v1/chat/completions' + key = 'YOUR_API_KEY' + + prompt_template = [ + {"role": "system", "content": template}, + ] + + chat_inputs={ + "model": model_name, + "messages": prompt_template + context + [ + { + "role": "user", + "content": prompt + } + ], + "temperature": temperature, + "top_p": top_p, + "n": n, + "max_tokens": max_tokens, + "stream": False, + } + + header = { + 'Content-Type': 'application/json', + 'Authorization': "Bearer " + key + } + + fastchat_response = requests.post(url, json=chat_inputs, headers=header) + + return fastchat_response.json()["choices"][0]["message"]["content"] + + async def fast_api_handler(self, request: Request) -> Response: + try: + data = await request.json() + except: + return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST) + + user_model_name = data.get("model_name") + user_context = data.get("context") + user_question = data.get("question") + user_template = data.get("template") + user_temperature = data.get("temperature") + user_top_p = data.get("top_p") + user_n = data.get("n") + user_max_tokens = data.get("max_tokens") + + + if user_question is None: + return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) + + if user_model_name is None or user_model_name.isspace() or user_model_name == "": + user_model_name = "Qwen1.5-14B-Chat" + + if user_template is None or user_template.isspace(): + user_template = "" + + if user_temperature is None or user_temperature == "": + user_temperature = 0.7 + + if user_top_p is None or user_top_p == "": + user_top_p = 1 + + if user_n is None or user_n == "": + user_n = 1 + + if user_max_tokens is None or user_max_tokens == "": + user_max_tokens = 1024 + + + return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context, + user_temperature, user_top_p, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK) \ No newline at end of file From 99ecc45a47e5333b60830e10792ced0d3cee7cd3 Mon Sep 17 00:00:00 2001 From: ACBBZ Date: Thu, 23 May 2024 02:52:32 +0000 Subject: [PATCH 4/5] update chat --- src/blackbox/blackbox_factory.py | 56 ++++++++++++++++---------------- src/blackbox/chat.py | 25 ++++++++++---- src/blackbox/g2e.py | 33 +++++++++++-------- 3 files changed, 66 insertions(+), 48 deletions(-) diff --git a/src/blackbox/blackbox_factory.py b/src/blackbox/blackbox_factory.py index 624a760..c9baa6a 100644 --- a/src/blackbox/blackbox_factory.py +++ b/src/blackbox/blackbox_factory.py @@ -1,14 +1,14 @@ -# from .audio_chat import AudioChat -# from .sentiment import Sentiment -# from .tts import TTS -# from .asr import ASR -# from .audio_to_text import AudioToText +from .audio_chat import AudioChat +from .sentiment import Sentiment +from .tts import TTS +from .asr import ASR +from .audio_to_text import AudioToText from .blackbox import Blackbox # from .text_to_audio import TextToAudio # from .tesou import Tesou from .fastchat import Fastchat -# from .g2e import G2E -# from .text_and_image import TextAndImage +from .g2e import G2E +from .text_and_image import TextAndImage from .chroma_query import ChromaQuery from .chroma_upsert import ChromaUpsert from .chroma_chat import ChromaChat @@ -20,29 +20,29 @@ class BlackboxFactory: @inject def __init__(self, - # audio_to_text: AudioToText, - # text_to_audio: TextToAudio, - # asr: ASR, - # tts: TTS, - # sentiment_engine: Sentiment, - # tesou: Tesou, + audio_to_text: AudioToText, + text_to_audio: TextToAudio, + asr: ASR, + tts: TTS, + sentiment_engine: Sentiment, + tesou: Tesou, fastchat: Fastchat, - # audio_chat: AudioChat, - # g2e: G2E, - # text_and_image:TextAndImage, + audio_chat: AudioChat, + g2e: G2E, + text_and_image:TextAndImage, chroma_query: ChromaQuery, chroma_upsert: ChromaUpsert, chroma_chat: ChromaChat) -> None: - # self.models["audio_to_text"] = audio_to_text - # self.models["text_to_audio"] = text_to_audio - # self.models["asr"] = asr - # self.models["tts"] = tts - # self.models["sentiment_engine"] = sentiment_engine - # self.models["tesou"] = tesou + self.models["audio_to_text"] = audio_to_text + self.models["text_to_audio"] = text_to_audio + self.models["asr"] = asr + self.models["tts"] = tts + self.models["sentiment_engine"] = sentiment_engine + self.models["tesou"] = tesou self.models["fastchat"] = fastchat - # self.models["audio_chat"] = audio_chat - # self.models["g2e"] = g2e - # self.models["text_and_image"] = text_and_image + self.models["audio_chat"] = audio_chat + self.models["g2e"] = g2e + self.models["text_and_image"] = text_and_image self.models["chroma_query"] = chroma_query self.models["chroma_upsert"] = chroma_upsert self.models["chroma_chat"] = chroma_chat @@ -50,8 +50,8 @@ class BlackboxFactory: def __call__(self, *args, **kwargs): return self.processing(*args, **kwargs) - def call_blackbox(self, blackbox_name: str) -> Blackbox: + def get_blackbox(self, blackbox_name: str) -> Blackbox: model = self.models.get(blackbox_name) if model is None: - raise ValueError("Invalid blockbox type") - return model \ No newline at end of file + raise ValueError("Invalid Blackbox Type...") + return model diff --git a/src/blackbox/chat.py b/src/blackbox/chat.py index 316fcc4..0d5448f 100644 --- a/src/blackbox/chat.py +++ b/src/blackbox/chat.py @@ -21,7 +21,7 @@ class Chat(Blackbox): return isinstance(data, list) # model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b - def processing(self, model_name, prompt, template, context: list, temperature, top_p, n, max_tokens) -> str: + def processing(self, model_name, prompt, template, context: list, temperature, top_p, n, max_tokens,stop,frequency_penalty,presence_penalty) -> str: if context == None: context = [] @@ -49,7 +49,9 @@ class Chat(Blackbox): "top_p": top_p, "n": n, "max_tokens": max_tokens, - "stream": False, + "frequency_penalty": frequency_penalty, + "presence_penalty": presence_penalty, + "stop": stop } header = { @@ -75,7 +77,9 @@ class Chat(Blackbox): user_top_p = data.get("top_p") user_n = data.get("n") user_max_tokens = data.get("max_tokens") - + user_stop = data.get("stop") + user_frequency_penalty = data.get("frequency_penalty") + user_presence_penalty = data.get("presence_penalty") if user_question is None: return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) @@ -87,10 +91,10 @@ class Chat(Blackbox): user_template = "" if user_temperature is None or user_temperature == "": - user_temperature = 0.7 + user_temperature = 0.8 if user_top_p is None or user_top_p == "": - user_top_p = 1 + user_top_p = 0.8 if user_n is None or user_n == "": user_n = 1 @@ -98,6 +102,15 @@ class Chat(Blackbox): if user_max_tokens is None or user_max_tokens == "": user_max_tokens = 1024 + if user_stop is None or user_stop == "": + user_stop = 100 + + if user_frequency_penalty is None or user_frequency_penalty == "": + user_frequency_penalty = 0.5 + + if user_presence_penalty is None or user_presence_penalty == "": + user_presence_penalty = 0.8 + return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context, - user_temperature, user_top_p, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK) \ No newline at end of file + user_temperature, user_top_p, user_n, user_max_tokens,user_stop,user_frequency_penalty,user_presence_penalty)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/src/blackbox/g2e.py b/src/blackbox/g2e.py index 8bd5507..416ab72 100755 --- a/src/blackbox/g2e.py +++ b/src/blackbox/g2e.py @@ -19,11 +19,11 @@ class G2E(Blackbox): return isinstance(data, list) # model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b - def processing(self, model_name, prompt, template, context: list) -> str: + def processing(self, model_name, prompt, template, context: list) -> str: if context == None: context = [] - url = 'http://120.196.116.194:48890/v1' - #url = 'http://120.196.116.194:48892/v1' + #url = 'http://120.196.116.194:48890/v1' + url = 'http://120.196.116.194:48892/v1' background_prompt = '''KOMBUKIKI是一款茶饮料,目标受众 年龄:20-35岁 性别:女性 地点:一线城市、二线城市 职业:精英中产、都市白领 收入水平:中高收入,有一定消费能力 兴趣和爱好:注重健康,有运动习惯 @@ -42,41 +42,46 @@ class G2E(Blackbox): KOMBUKIKI康普茶价格 内地常规版:25 RMB 澳门常规版:28-29 MOP''' - prompt1 = ''''你是琪琪,活泼的康普茶看板娘,同时你对澳门十分熟悉,是一个澳门旅游专家,请回答任何关于澳门旅游的问题,回答尽量简练明了。 - ''' - inject_prompt = '(用活泼的语气说话回答,回答严格限制50字以内)' + prompt1 = '''你是琪琪,活泼的康普茶看板娘,同时你对澳门十分熟悉,是一个澳门旅游专家,请回答任何关于澳门旅游的问题,回答尽量简练明了。''' + #inject_prompt = '(用活泼的语气说话回答,回答严格限制50字以内)' + inject_prompt = '(回答简练,不要输出重复内容,只讲中文)' - prompt_template = [ - {"role": "system", "content": background_prompt + prompt1}, - ] #prompt_template = [ - # {"role": "system", "content": ''}, + # {"role": "system", "content": background_prompt + prompt1}, #] - + prompt_template = [ + {"role": "system", "content": ''} + ] messages = prompt_template + context + [ { "role": "user", - "content": prompt + inject_prompt + "content": prompt } ] + print("**** History with current prompt input : ****") + print(messages) client = OpenAI( api_key='YOUR_API_KEY', base_url=url ) model_name = client.models.list().data[0].id + #model_name = client.models.list().data[1].id print(model_name) + response = client.chat.completions.create( model=model_name, messages=messages, temperature=0.8, top_p=0.8, - # max_tokens = 50 + frequency_penalty=0.5, + presence_penalty=0.8, + stop=100 ) fastchat_content = response.choices[0].message.content - + print("*** Model response: " + fastchat_content + " ***") return fastchat_content async def fast_api_handler(self, request: Request) -> Response: From 38c5d43e481f64216918861fa3d860a9444dbb9a Mon Sep 17 00:00:00 2001 From: ACBBZ Date: Thu, 23 May 2024 04:02:50 +0000 Subject: [PATCH 5/5] update chroma_chat --- src/blackbox/chroma_chat.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/blackbox/chroma_chat.py b/src/blackbox/chroma_chat.py index 33b9840..01685f3 100755 --- a/src/blackbox/chroma_chat.py +++ b/src/blackbox/chroma_chat.py @@ -4,8 +4,7 @@ from fastapi import Request, Response, status from fastapi.responses import JSONResponse from .blackbox import Blackbox -from ..utils import chroma_setting -from .fastchat import Fastchat +from .chat import Chat from .chroma_query import ChromaQuery DEFAULT_COLLECTION_ID = "123" @@ -15,8 +14,8 @@ from injector import singleton,inject class ChromaChat(Blackbox): @inject - def __init__(self, fastchat: Fastchat, chroma_query: ChromaQuery): - self.fastchat = fastchat + def __init__(self, chat: Chat, chroma_query: ChromaQuery): + self.chat = chat self.chroma_query = chroma_query def __call__(self, *args, **kwargs): @@ -26,18 +25,18 @@ class ChromaChat(Blackbox): data = args[0] return isinstance(data, list) - def processing(self, question, setting: chroma_setting) -> str: + def processing(self, question, context: list) -> str: + if context == None: + context = [] # load or create collection - if setting is None: - collection_id = DEFAULT_COLLECTION_ID - else: - collection_id = setting.ChromaSetting.collection_ids[0] + collection_id = DEFAULT_COLLECTION_ID + # query it chroma_result = self.chroma_query(question, collection_id) fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源:" + chroma_result - response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字.', context=None, temperature=0.8, top_p=0.8, top_k=-1, n=1, max_tokens=1024) + response = self.chat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='', context=context, temperature=0.8, top_p=0.8, n=1, max_tokens=1024, stop=100,frequency_penalty=0.5,presence_penalty=0.8) return response