mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
update chroma and chat
This commit is contained in:
@ -32,6 +32,8 @@ class Chat(Blackbox):
|
||||
# @logging_time()
|
||||
def processing(self, prompt: str, context: list, settings: dict) -> str:
|
||||
|
||||
print("\n Settings: ", settings)
|
||||
|
||||
if settings is None:
|
||||
settings = {}
|
||||
user_model_name = settings.get("model_name")
|
||||
@ -58,16 +60,19 @@ class Chat(Blackbox):
|
||||
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if user_model_name is None or user_model_name.isspace() or user_model_name == "":
|
||||
user_model_name = "Qwen1.5-14B-Chat"
|
||||
user_model_name = "qwen"
|
||||
#user_model_name = "Qwen1.5-14B-Chat"
|
||||
|
||||
if user_template is None or user_template.isspace():
|
||||
user_template = ""
|
||||
|
||||
if user_temperature is None or user_temperature == "":
|
||||
user_temperature = 0.8
|
||||
user_temperature = 0
|
||||
#user_temperature = 0
|
||||
|
||||
if user_top_p is None or user_top_p == "":
|
||||
user_top_p = 0.8
|
||||
user_top_p = 0.1
|
||||
#user_top_p = 0.8
|
||||
|
||||
if user_n is None or user_n == "":
|
||||
user_n = 1
|
||||
@ -79,20 +84,22 @@ class Chat(Blackbox):
|
||||
user_stop = 100
|
||||
|
||||
if user_frequency_penalty is None or user_frequency_penalty == "":
|
||||
user_frequency_penalty = 0.5
|
||||
user_frequency_penalty = 0
|
||||
#user_frequency_penalty = 0.5
|
||||
|
||||
if user_presence_penalty is None or user_presence_penalty == "":
|
||||
user_presence_penalty = 0.8
|
||||
user_presence_penalty = 0
|
||||
#user_presence_penalty = 0.8
|
||||
|
||||
if user_model_url is None or user_model_url.isspace() or user_model_url == "":
|
||||
user_model_url = "http://120.196.116.194:48892/v1/chat/completions"
|
||||
user_model_url = "http://172.16.5.8:23333/v1/chat/completions"
|
||||
|
||||
if user_model_key is None or user_model_key.isspace() or user_model_key == "":
|
||||
user_model_key = "YOUR_API_KEY"
|
||||
|
||||
if chroma_embedding_model != None:
|
||||
chroma_response = self.chroma_query(user_question, settings)
|
||||
print(chroma_response)
|
||||
print("chroma_response", chroma_response)
|
||||
|
||||
if chroma_response != None or chroma_response != '':
|
||||
#user_question = f"像少女一般开朗活泼,回答简练。不要分条,回答内容不能出现“相关”或“\n”的标签字样。回答的内容需要与问题密切相关。检索内容:{chroma_response} 问题:{user_question} 任务说明:请首先判断提供的检索内容与上述问题是否相关,不需要回答是否相关。如果相关,则直接从检索内容中提炼出问题所需的信息。如果检索内容与问题不相关,则不参考检索内容,直接根据常识尝试回答问题。"
|
||||
@ -189,7 +196,7 @@ class Chat(Blackbox):
|
||||
# 问题中的“澳门银河”以及“银河”等于“澳门银河度假村”,“威尼斯人”等于“威尼斯人度假村”,“巴黎人”等于“巴黎人度假村”。
|
||||
# '''
|
||||
|
||||
user_template1 = '''
|
||||
user_template1 = f'''
|
||||
# Role: 琪琪,康普可可的代言人。
|
||||
|
||||
## Profile:
|
||||
@ -243,10 +250,16 @@ class Chat(Blackbox):
|
||||
}
|
||||
|
||||
fastchat_response = requests.post(url, json=chat_inputs, headers=header)
|
||||
print("\n", fastchat_response.json())
|
||||
print("\n","fastchat_response",fastchat_response.json()["choices"][0]["message"]["content"],"\n\n")
|
||||
print("\n", "user_question ", user_question)
|
||||
print("\n", "user_template1 ", user_template1)
|
||||
print("\n", "fastchat_response json\n", fastchat_response.json())
|
||||
response_result = fastchat_response.json()
|
||||
|
||||
return fastchat_response.json()["choices"][0]["message"]["content"]
|
||||
if response_result.get("choices") is None:
|
||||
return JSONResponse(content={"error": "LLM handle failure"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
print("\n", "user_answer ", fastchat_response.json()["choices"][0]["message"]["content"],"\n\n")
|
||||
return fastchat_response.json()["choices"][0]["message"]["content"]
|
||||
|
||||
async def fast_api_handler(self, request: Request) -> Response:
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user