diff --git a/src/blackbox/chat.py b/src/blackbox/chat.py index 369c228..bfee46c 100644 --- a/src/blackbox/chat.py +++ b/src/blackbox/chat.py @@ -40,6 +40,9 @@ class Chat(Blackbox): user_stop = settings.get("stop") user_frequency_penalty = settings.get("frequency_penalty") user_presence_penalty = settings.get("presence_penalty") + user_model_url = settings.get("model_url") + user_model_key = settings.get("model_key") + if user_context == None: user_context = [] @@ -72,15 +75,20 @@ class Chat(Blackbox): if user_presence_penalty is None or user_presence_penalty == "": user_presence_penalty = 0.8 + + if user_model_url is None or user_model_url.isspace() or user_model_url == "": + user_model_url = "http://120.196.116.194:48892/v1/chat/completions" + if user_model_key is None or user_model_key.isspace() or user_model_key == "": + user_model_key = "YOUR_API_KEY" # gpt-4, gpt-3.5-turbo if re.search(r"gpt", user_model_name): url = 'https://api.openai.com/v1/completions' key = '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' else: - url = 'http://120.196.116.194:48892/v1/chat/completions' - key = 'YOUR_API_KEY' + url = user_model_url + key = user_model_key prompt_template = [ {"role": "system", "content": user_template}, diff --git a/src/blackbox/chroma_chat.py b/src/blackbox/chroma_chat.py index 01685f3..fe3cb3e 100755 --- a/src/blackbox/chroma_chat.py +++ b/src/blackbox/chroma_chat.py @@ -6,7 +6,10 @@ from .blackbox import Blackbox from .chat import Chat from .chroma_query import ChromaQuery +from ..log.logging_time import logging_time +import logging +logger = logging.getLogger DEFAULT_COLLECTION_ID = "123" from injector import singleton,inject @@ -24,19 +27,68 @@ class ChromaChat(Blackbox): def valid(self, *args, **kwargs) -> bool: data = args[0] return isinstance(data, list) + + @logging_time(logger=logger) + def processing(self, question: str, context: list, settings: dict) -> str: + + if settings is None: + settings = {} + + # # chat setting + user_model_name = settings.get("model_name") + user_context = context + user_question = question + user_template = settings.get("template") + user_temperature = settings.get("temperature") + user_top_p = settings.get("top_p") + user_n = settings.get("n") + user_max_tokens = settings.get("max_tokens") + user_stop = settings.get("stop") + user_frequency_penalty = settings.get("frequency_penalty") + user_presence_penalty = settings.get("presence_penalty") + + # # chroma_query settings + chroma_embedding_model = settings.get("chroma_embedding_model") + chroma_host = settings.get("chroma_host") + chroma_port = settings.get("chroma_port") + chroma_collection_id = settings.get("chroma_collection_id") + chroma_n_results = settings.get("chroma_n_results") - def processing(self, question, context: list) -> str: if context == None: context = [] - # load or create collection - collection_id = DEFAULT_COLLECTION_ID + if user_question is None: + return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) - # query it - chroma_result = self.chroma_query(question, collection_id) + chroma_settings_json={ + "chroma_embedding_model": chroma_embedding_model, + "chroma_host": chroma_host, + "chroma_port": chroma_port, + "chroma_collection_id": chroma_collection_id, + "chroma_n_results": chroma_n_results + } - fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源:" + chroma_result - response = self.chat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='', context=context, temperature=0.8, top_p=0.8, n=1, max_tokens=1024, stop=100,frequency_penalty=0.5,presence_penalty=0.8) + # chroma answer + chroma_result = self.chroma_query(user_question, chroma_settings_json) + + # chat prompt + fast_question = f"问题: {user_question}。根据问题,总结以下内容和来源:{chroma_result}" + + chat_settings_json = { + "model_name": user_model_name, + "context": user_context, + "template": user_template, + "temperature": user_temperature, + "top_p": user_top_p, + "n": user_n, + "max_tokens": user_max_tokens, + "stop": user_stop, + "frequency_penalty": user_frequency_penalty, + "presence_penalty": user_presence_penalty + } + + # chat answer + response = self.chat(fast_question, chat_settings_json) return response @@ -49,10 +101,8 @@ class ChromaChat(Blackbox): user_question = data.get("question") user_context = data.get("context") - - if user_question is None: - return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) + setting: dict = data.get("settings") return JSONResponse( - content={"response": self.processing(user_question, user_context)}, + content={"response": self.processing(user_question, user_context, setting)}, status_code=status.HTTP_200_OK) \ No newline at end of file diff --git a/src/blackbox/chroma_query.py b/src/blackbox/chroma_query.py index 6e39eae..22c8581 100755 --- a/src/blackbox/chroma_query.py +++ b/src/blackbox/chroma_query.py @@ -6,8 +6,11 @@ from .blackbox import Blackbox import chromadb from chromadb.utils import embedding_functions -from ..utils import chroma_setting +import logging +from ..log.logging_time import logging_time +import re +logger = logging.getLogger DEFAULT_COLLECTION_ID = "123" from injector import singleton @@ -16,10 +19,11 @@ class ChromaQuery(Blackbox): def __init__(self, *args, **kwargs) -> None: # config = read_yaml(args[0]) - # load embedding model - self.embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/model/Weight/BAAI/bge-small-en-v1.5", device = "cuda") - # load chromadb - self.client = chromadb.HttpClient(host='10.6.82.192', port=8000) + # load chromadb and embedding model + self.embedding_model_1 = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/model/Weight/BAAI/bge-small-en-v1.5", device = "cuda") + # self.embedding_model_2 = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/model/Weight/BAAI/bge-small-en-v1.5", device = "cuda") + self.client_1 = chromadb.HttpClient(host='10.6.82.192', port=8000) + # self.client_2 = chromadb.HttpClient(host='10.6.82.192', port=8000) def __call__(self, *args, **kwargs): return self.processing(*args, **kwargs) @@ -28,14 +32,52 @@ class ChromaQuery(Blackbox): data = args[0] return isinstance(data, list) - def processing(self, question: str, collection_id) -> str: + @logging_time(logger=logger) + def processing(self, question: str, settings: dict) -> str: - # load or create collection - collection = self.client.get_collection(collection_id, embedding_function=self.embedding_model) + if settings is None: + settings = {} + + usr_question = question + + # # chroma_query settings + chroma_embedding_model = settings.get("chroma_embedding_model") + chroma_host = settings.get("chroma_host") + chroma_port = settings.get("chroma_port") + chroma_collection_id = settings.get("chroma_collection_id") + chroma_n_results = settings.get("chroma_n_results") + + if usr_question is None: + return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) + + if chroma_embedding_model is None or chroma_embedding_model.isspace() or chroma_embedding_model == "": + chroma_embedding_model = "bge-small-en-v1.5" + + if chroma_host is None or chroma_host.isspace() or chroma_host == "": + chroma_host = "10.6.82.192" + + if chroma_port is None or chroma_port.isspace() or chroma_port == "": + chroma_port = "8000" + + if chroma_collection_id is None or chroma_collection_id.isspace() or chroma_collection_id == "": + chroma_collection_id = DEFAULT_COLLECTION_ID + + if chroma_n_results is None or chroma_n_results == "": + chroma_n_results = 3 + + # load client + if re.search(r"10.6.82.192", chroma_host) and re.search(r"8000", chroma_port): + client = self.client_1 + + if re.search(r"bge-small-en-v1.5", chroma_embedding_model): + embedding_model = self.embedding_model_1 + + # load collection + collection = client.get_collection(chroma_collection_id, embedding_function=embedding_model) # query it results = collection.query( - query_texts=[question], + query_texts=[usr_question], n_results=3, ) @@ -50,14 +92,8 @@ class ChromaQuery(Blackbox): return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST) user_question = data.get("question") - user_collection_id = data.get("collection_id") - - if user_question is None: - return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST) - - if user_collection_id is None: - user_collection_id = DEFAULT_COLLECTION_ID + setting = data.get("settings") return JSONResponse( - content={"response": self.processing(user_question, user_collection_id)}, + content={"response": self.processing(user_question, setting)}, status_code=status.HTTP_200_OK) \ No newline at end of file