mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
feat: add lmdeploy stream chat
This commit is contained in:
@ -2,6 +2,7 @@ from typing import Any, Coroutine
|
|||||||
|
|
||||||
from fastapi import Request, Response, status
|
from fastapi import Request, Response, status
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
|
from sse_starlette.sse import EventSourceResponse
|
||||||
|
|
||||||
from ..log.logging_time import logging_time
|
from ..log.logging_time import logging_time
|
||||||
from .blackbox import Blackbox
|
from .blackbox import Blackbox
|
||||||
@ -58,6 +59,7 @@ class Chat(Blackbox):
|
|||||||
chroma_response = ''
|
chroma_response = ''
|
||||||
system_prompt = settings.get('system_prompt')
|
system_prompt = settings.get('system_prompt')
|
||||||
user_prompt_template = settings.get('user_prompt_template')
|
user_prompt_template = settings.get('user_prompt_template')
|
||||||
|
user_stream = settings.get('stream')
|
||||||
|
|
||||||
if user_context == None:
|
if user_context == None:
|
||||||
user_context = []
|
user_context = []
|
||||||
@ -98,7 +100,7 @@ class Chat(Blackbox):
|
|||||||
#user_presence_penalty = 0.8
|
#user_presence_penalty = 0.8
|
||||||
|
|
||||||
if user_model_url is None or user_model_url.isspace() or user_model_url == "":
|
if user_model_url is None or user_model_url.isspace() or user_model_url == "":
|
||||||
user_model_url = "http://192.168.0.200:23333/v1/chat/completions"
|
user_model_url = "http://10.6.80.75:23333/v1/chat/completions"
|
||||||
|
|
||||||
if user_model_key is None or user_model_key.isspace() or user_model_key == "":
|
if user_model_key is None or user_model_key.isspace() or user_model_key == "":
|
||||||
user_model_key = "YOUR_API_KEY"
|
user_model_key = "YOUR_API_KEY"
|
||||||
@ -119,6 +121,9 @@ class Chat(Blackbox):
|
|||||||
|
|
||||||
print(f"1.user_question: {user_question}")
|
print(f"1.user_question: {user_question}")
|
||||||
|
|
||||||
|
if user_stream in [None, ""]:
|
||||||
|
user_stream = False
|
||||||
|
|
||||||
|
|
||||||
# 文心格式和openai的不一样,需要单独处理
|
# 文心格式和openai的不一样,需要单独处理
|
||||||
if re.search(r"ernie", user_model_name):
|
if re.search(r"ernie", user_model_name):
|
||||||
@ -169,6 +174,7 @@ class Chat(Blackbox):
|
|||||||
key = user_model_key
|
key = user_model_key
|
||||||
header = {
|
header = {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
|
"Cache-Control": "no-cache", # 禁用缓存
|
||||||
}
|
}
|
||||||
|
|
||||||
# system_prompt = "# Role: 琪琪,康普可可的代言人。\n\n## Profile:\n**Author**: 琪琪。\n**Language**: 中文。\n**Description**: 琪琪,是康普可可的代言人,由博维开发。你擅长澳门文旅问答。\n\n## Constraints:\n- **严格遵循工作流程**: 严格遵循<Workflow >中设定的工作流程。\n- **无内置知识库** :根据<Workflow >中提供的知识作答,而不是内置知识库,我虽然是知识库专家,但我的知识依赖于外部输入,而不是大模型已有知识。\n- **回复格式**:在进行回复时,不能输出“检索内容” 标签字样,同时也不能直接透露知识片段原文。\n\n## Workflow:\n1. **接收查询**:接收用户的问题。\n2. **判断问题**:首先自行判断下方问题与检索内容是否相关,若相关则根据检索内容总结概括相关信息进行回答;若检索内容与问题无关,则根据自身知识进行回答。\n3. **提供回答**:\n\n```\n基于检索内容中的知识片段回答用户的问题。回答内容限制总结在50字内。\n请首先判断提供的检索内容与上述问题是否相关。如果相关,直接从检索内容中提炼出直接回答问题所需的信息,不要乱说或者回答“相关”等字眼 。如果检索内容与问题不相关,则不参考检索内容,则回答:“对不起,我无法回答此问题哦。”\n\n```\n## Example:\n\n用户询问:“中国的首都是哪个城市?” 。\n2.1检索知识库,首先检查知识片段,如果检索内容中没有与用户的问题相关的内容,则回答:“对不起,我无法回答此问题哦。\n2.2如果有知识片段,在做出回复时,只能基于检索内容中的内容进行回答,且不能透露上下文原文,同时也不能出现检索内容的标签字样。\n"
|
# system_prompt = "# Role: 琪琪,康普可可的代言人。\n\n## Profile:\n**Author**: 琪琪。\n**Language**: 中文。\n**Description**: 琪琪,是康普可可的代言人,由博维开发。你擅长澳门文旅问答。\n\n## Constraints:\n- **严格遵循工作流程**: 严格遵循<Workflow >中设定的工作流程。\n- **无内置知识库** :根据<Workflow >中提供的知识作答,而不是内置知识库,我虽然是知识库专家,但我的知识依赖于外部输入,而不是大模型已有知识。\n- **回复格式**:在进行回复时,不能输出“检索内容” 标签字样,同时也不能直接透露知识片段原文。\n\n## Workflow:\n1. **接收查询**:接收用户的问题。\n2. **判断问题**:首先自行判断下方问题与检索内容是否相关,若相关则根据检索内容总结概括相关信息进行回答;若检索内容与问题无关,则根据自身知识进行回答。\n3. **提供回答**:\n\n```\n基于检索内容中的知识片段回答用户的问题。回答内容限制总结在50字内。\n请首先判断提供的检索内容与上述问题是否相关。如果相关,直接从检索内容中提炼出直接回答问题所需的信息,不要乱说或者回答“相关”等字眼 。如果检索内容与问题不相关,则不参考检索内容,则回答:“对不起,我无法回答此问题哦。”\n\n```\n## Example:\n\n用户询问:“中国的首都是哪个城市?” 。\n2.1检索知识库,首先检查知识片段,如果检索内容中没有与用户的问题相关的内容,则回答:“对不起,我无法回答此问题哦。\n2.2如果有知识片段,在做出回复时,只能基于检索内容中的内容进行回答,且不能透露上下文原文,同时也不能出现检索内容的标签字样。\n"
|
||||||
@ -191,10 +197,10 @@ class Chat(Blackbox):
|
|||||||
"max_tokens": str(user_max_tokens),
|
"max_tokens": str(user_max_tokens),
|
||||||
"frequency_penalty": str(user_frequency_penalty),
|
"frequency_penalty": str(user_frequency_penalty),
|
||||||
"presence_penalty": str(user_presence_penalty),
|
"presence_penalty": str(user_presence_penalty),
|
||||||
"stop": str(user_stop)
|
"stop": str(user_stop),
|
||||||
|
"stream": user_stream,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# # 获取当前时间戳
|
# # 获取当前时间戳
|
||||||
# timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
# timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
@ -206,18 +212,48 @@ class Chat(Blackbox):
|
|||||||
# # 将 chat_inputs 转换为 JSON 格式并写入文件
|
# # 将 chat_inputs 转换为 JSON 格式并写入文件
|
||||||
# f.write(json.dumps(chat_inputs, ensure_ascii=False, indent=4))
|
# f.write(json.dumps(chat_inputs, ensure_ascii=False, indent=4))
|
||||||
# f.write("\n\n") # 添加换行以区分不同的运行
|
# f.write("\n\n") # 添加换行以区分不同的运行
|
||||||
|
if user_stream:
|
||||||
|
with requests.post(url, json=chat_inputs, headers=header, stream=True) as fastchat_response:
|
||||||
|
if fastchat_response.status_code != 200:
|
||||||
|
yield json.dumps({"error": "LLM handle failure"})
|
||||||
|
else:
|
||||||
|
# 从流式响应中逐步读取内容
|
||||||
|
for chunk in fastchat_response.iter_lines(decode_unicode=True):
|
||||||
|
if chunk: # 确保内容非空
|
||||||
|
# print("Raw Chunk:", chunk) # 打印chunk的内容
|
||||||
|
|
||||||
fastchat_response = requests.post(url, json=chat_inputs, headers=header)
|
# 去除前缀 `data:` 并只解析 JSON 部分
|
||||||
print("\n", "user_prompt: ", prompt)
|
if chunk.startswith("data:"):
|
||||||
# print("\n", "system_prompt ", system_prompt)
|
chunk = chunk[len("data:"):].strip() # 移除 `data:` 前缀并去掉空白字符
|
||||||
print("\n", "fastchat_response json:\n", fastchat_response.json())
|
try:
|
||||||
response_result = fastchat_response.json()
|
# 尝试将当前chunk解析为JSON
|
||||||
|
parsed_chunk = json.loads(chunk)
|
||||||
|
|
||||||
if response_result.get("choices") is None:
|
# 解析成功后,提取 `content`
|
||||||
return JSONResponse(content={"error": "LLM handle failure"}, status_code=status.HTTP_400_BAD_REQUEST)
|
content = parsed_chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
|
||||||
|
|
||||||
|
# 如果有 content,就逐字输出
|
||||||
|
for char in content:
|
||||||
|
print(char, end="", flush=True) # 逐字输出,end="" 防止换行,flush=True 保证实时输出
|
||||||
|
yield char # 输出当前字符
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# print("---- Error in JSON parsing ----") # 打印错误信息
|
||||||
|
continue # 继续处理下一个chunk,直到解析成功
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("\n", "user_answer: ", fastchat_response.json()["choices"][0]["message"]["content"],"\n\n")
|
print("*"*90)
|
||||||
return fastchat_response.json()["choices"][0]["message"]["content"]
|
fastchat_response = requests.post(url, json=chat_inputs, headers=header)
|
||||||
|
print("\n", "user_prompt: ", prompt)
|
||||||
|
# print("\n", "system_prompt ", system_prompt)
|
||||||
|
print("\n", "fastchat_response json:\n", fastchat_response.json())
|
||||||
|
response_result = fastchat_response.json()
|
||||||
|
|
||||||
|
if response_result.get("choices") is None:
|
||||||
|
yield JSONResponse(content={"error": "LLM handle failure"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||||
|
else:
|
||||||
|
print("\n", "user_answer: ", fastchat_response.json()["choices"][0]["message"]["content"],"\n\n")
|
||||||
|
yield fastchat_response.json()["choices"][0]["message"]["content"]
|
||||||
|
|
||||||
async def fast_api_handler(self, request: Request) -> Response:
|
async def fast_api_handler(self, request: Request) -> Response:
|
||||||
try:
|
try:
|
||||||
@ -228,5 +264,10 @@ class Chat(Blackbox):
|
|||||||
setting: dict = data.get("settings")
|
setting: dict = data.get("settings")
|
||||||
context = data.get("context")
|
context = data.get("context")
|
||||||
prompt = data.get("prompt")
|
prompt = data.get("prompt")
|
||||||
|
user_stream = setting.get("stream")
|
||||||
return JSONResponse(content={"response": self.processing(prompt, context, setting)}, status_code=status.HTTP_200_OK)
|
|
||||||
|
if user_stream:
|
||||||
|
return EventSourceResponse(self.processing(prompt, context, setting))
|
||||||
|
else:
|
||||||
|
response_content = "".join(self.processing(prompt, context, setting))
|
||||||
|
return JSONResponse(content={"response": response_content}, status_code=status.HTTP_200_OK)
|
||||||
Reference in New Issue
Block a user