mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-14 00:53:25 +00:00
90 lines
3.1 KiB
Python
Executable File
90 lines
3.1 KiB
Python
Executable File
from typing import Any, Coroutine
|
|
|
|
from fastapi import Request, Response, status
|
|
from fastapi.responses import JSONResponse
|
|
from .blackbox import Blackbox
|
|
|
|
from .chat import Chat
|
|
from .chroma_query import ChromaQuery
|
|
from ..log.logging_time import logging_time
|
|
import logging
|
|
|
|
logger = logging.getLogger
|
|
DEFAULT_COLLECTION_ID = "123"
|
|
|
|
from injector import singleton,inject
|
|
@singleton
|
|
class ChromaChat(Blackbox):
|
|
|
|
@inject
|
|
def __init__(self, chat: Chat, chroma_query: ChromaQuery):
|
|
self.chat = chat
|
|
self.chroma_query = chroma_query
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
return self.processing(*args, **kwargs)
|
|
|
|
def valid(self, *args, **kwargs) -> bool:
|
|
data = args[0]
|
|
return isinstance(data, list)
|
|
|
|
@logging_time(logger=logger)
|
|
def processing(self, question: str, context: list, settings: dict) -> str:
|
|
|
|
# chroma_chat settings
|
|
# {
|
|
# "chroma_embedding_model": "/model/Weight/BAAI/bge-large-zh-v1.5",
|
|
# "chroma_host": "10.6.82.192",
|
|
# "chroma_port": "8000",
|
|
# "chroma_collection_id": "g2e",
|
|
# "chroma_n_results": 3,
|
|
# "model_name": "Qwen1.5-14B-Chat",
|
|
# "context": [],
|
|
# "template": "",
|
|
# "temperature": 0,
|
|
# "top_p": 0.1,
|
|
# "n": 1,
|
|
# "max_tokens": 1024,
|
|
# "frequency_penalty": 0,
|
|
# "presence_penalty": 0,
|
|
# "stop": 100,
|
|
# "model_url": "http://120.196.116.194:48892/v1/chat/completions",
|
|
# "model_key": "YOUR_API_KEY"
|
|
# }
|
|
|
|
if settings is None:
|
|
settings = {}
|
|
|
|
user_context = context
|
|
user_question = question
|
|
|
|
if user_context == None:
|
|
user_context = []
|
|
|
|
if user_question is None:
|
|
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
|
|
|
# chroma answer
|
|
chroma_result = self.chroma_query(user_question, settings)
|
|
|
|
# chat prompt
|
|
fast_question = f"问题: {user_question}。- 根据知识库内的检索结果,以清晰简洁的表达方式回答问题。只从检索的内容中选取与问题相关信息。- 不要编造答案,如果答案不在经核实的资料中或无法从经核实的资料中得出,请回答“我无法回答您的问题。”检索内容:{chroma_result}"
|
|
|
|
# chat answer
|
|
response = self.chat(fast_question, user_context, settings)
|
|
|
|
return response
|
|
|
|
|
|
async def fast_api_handler(self, request: Request) -> Response:
|
|
try:
|
|
data = await request.json()
|
|
except:
|
|
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
|
|
|
|
user_question = data.get("question")
|
|
user_context = data.get("context")
|
|
setting: dict = data.get("settings")
|
|
|
|
return JSONResponse(
|
|
content={"response": self.processing(user_question, user_context, setting)}, status_code=status.HTTP_200_OK) |