mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
update chat
This commit is contained in:
@ -1,14 +1,14 @@
|
|||||||
# from .audio_chat import AudioChat
|
from .audio_chat import AudioChat
|
||||||
# from .sentiment import Sentiment
|
from .sentiment import Sentiment
|
||||||
# from .tts import TTS
|
from .tts import TTS
|
||||||
# from .asr import ASR
|
from .asr import ASR
|
||||||
# from .audio_to_text import AudioToText
|
from .audio_to_text import AudioToText
|
||||||
from .blackbox import Blackbox
|
from .blackbox import Blackbox
|
||||||
# from .text_to_audio import TextToAudio
|
# from .text_to_audio import TextToAudio
|
||||||
# from .tesou import Tesou
|
# from .tesou import Tesou
|
||||||
from .fastchat import Fastchat
|
from .fastchat import Fastchat
|
||||||
# from .g2e import G2E
|
from .g2e import G2E
|
||||||
# from .text_and_image import TextAndImage
|
from .text_and_image import TextAndImage
|
||||||
from .chroma_query import ChromaQuery
|
from .chroma_query import ChromaQuery
|
||||||
from .chroma_upsert import ChromaUpsert
|
from .chroma_upsert import ChromaUpsert
|
||||||
from .chroma_chat import ChromaChat
|
from .chroma_chat import ChromaChat
|
||||||
@ -20,29 +20,29 @@ class BlackboxFactory:
|
|||||||
|
|
||||||
@inject
|
@inject
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
# audio_to_text: AudioToText,
|
audio_to_text: AudioToText,
|
||||||
# text_to_audio: TextToAudio,
|
text_to_audio: TextToAudio,
|
||||||
# asr: ASR,
|
asr: ASR,
|
||||||
# tts: TTS,
|
tts: TTS,
|
||||||
# sentiment_engine: Sentiment,
|
sentiment_engine: Sentiment,
|
||||||
# tesou: Tesou,
|
tesou: Tesou,
|
||||||
fastchat: Fastchat,
|
fastchat: Fastchat,
|
||||||
# audio_chat: AudioChat,
|
audio_chat: AudioChat,
|
||||||
# g2e: G2E,
|
g2e: G2E,
|
||||||
# text_and_image:TextAndImage,
|
text_and_image:TextAndImage,
|
||||||
chroma_query: ChromaQuery,
|
chroma_query: ChromaQuery,
|
||||||
chroma_upsert: ChromaUpsert,
|
chroma_upsert: ChromaUpsert,
|
||||||
chroma_chat: ChromaChat) -> None:
|
chroma_chat: ChromaChat) -> None:
|
||||||
# self.models["audio_to_text"] = audio_to_text
|
self.models["audio_to_text"] = audio_to_text
|
||||||
# self.models["text_to_audio"] = text_to_audio
|
self.models["text_to_audio"] = text_to_audio
|
||||||
# self.models["asr"] = asr
|
self.models["asr"] = asr
|
||||||
# self.models["tts"] = tts
|
self.models["tts"] = tts
|
||||||
# self.models["sentiment_engine"] = sentiment_engine
|
self.models["sentiment_engine"] = sentiment_engine
|
||||||
# self.models["tesou"] = tesou
|
self.models["tesou"] = tesou
|
||||||
self.models["fastchat"] = fastchat
|
self.models["fastchat"] = fastchat
|
||||||
# self.models["audio_chat"] = audio_chat
|
self.models["audio_chat"] = audio_chat
|
||||||
# self.models["g2e"] = g2e
|
self.models["g2e"] = g2e
|
||||||
# self.models["text_and_image"] = text_and_image
|
self.models["text_and_image"] = text_and_image
|
||||||
self.models["chroma_query"] = chroma_query
|
self.models["chroma_query"] = chroma_query
|
||||||
self.models["chroma_upsert"] = chroma_upsert
|
self.models["chroma_upsert"] = chroma_upsert
|
||||||
self.models["chroma_chat"] = chroma_chat
|
self.models["chroma_chat"] = chroma_chat
|
||||||
@ -50,8 +50,8 @@ class BlackboxFactory:
|
|||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
return self.processing(*args, **kwargs)
|
return self.processing(*args, **kwargs)
|
||||||
|
|
||||||
def call_blackbox(self, blackbox_name: str) -> Blackbox:
|
def get_blackbox(self, blackbox_name: str) -> Blackbox:
|
||||||
model = self.models.get(blackbox_name)
|
model = self.models.get(blackbox_name)
|
||||||
if model is None:
|
if model is None:
|
||||||
raise ValueError("Invalid blockbox type")
|
raise ValueError("Invalid Blackbox Type...")
|
||||||
return model
|
return model
|
||||||
|
|||||||
@ -21,7 +21,7 @@ class Chat(Blackbox):
|
|||||||
return isinstance(data, list)
|
return isinstance(data, list)
|
||||||
|
|
||||||
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
||||||
def processing(self, model_name, prompt, template, context: list, temperature, top_p, n, max_tokens) -> str:
|
def processing(self, model_name, prompt, template, context: list, temperature, top_p, n, max_tokens,stop,frequency_penalty,presence_penalty) -> str:
|
||||||
if context == None:
|
if context == None:
|
||||||
context = []
|
context = []
|
||||||
|
|
||||||
@ -49,7 +49,9 @@ class Chat(Blackbox):
|
|||||||
"top_p": top_p,
|
"top_p": top_p,
|
||||||
"n": n,
|
"n": n,
|
||||||
"max_tokens": max_tokens,
|
"max_tokens": max_tokens,
|
||||||
"stream": False,
|
"frequency_penalty": frequency_penalty,
|
||||||
|
"presence_penalty": presence_penalty,
|
||||||
|
"stop": stop
|
||||||
}
|
}
|
||||||
|
|
||||||
header = {
|
header = {
|
||||||
@ -75,7 +77,9 @@ class Chat(Blackbox):
|
|||||||
user_top_p = data.get("top_p")
|
user_top_p = data.get("top_p")
|
||||||
user_n = data.get("n")
|
user_n = data.get("n")
|
||||||
user_max_tokens = data.get("max_tokens")
|
user_max_tokens = data.get("max_tokens")
|
||||||
|
user_stop = data.get("stop")
|
||||||
|
user_frequency_penalty = data.get("frequency_penalty")
|
||||||
|
user_presence_penalty = data.get("presence_penalty")
|
||||||
|
|
||||||
if user_question is None:
|
if user_question is None:
|
||||||
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||||
@ -87,10 +91,10 @@ class Chat(Blackbox):
|
|||||||
user_template = ""
|
user_template = ""
|
||||||
|
|
||||||
if user_temperature is None or user_temperature == "":
|
if user_temperature is None or user_temperature == "":
|
||||||
user_temperature = 0.7
|
user_temperature = 0.8
|
||||||
|
|
||||||
if user_top_p is None or user_top_p == "":
|
if user_top_p is None or user_top_p == "":
|
||||||
user_top_p = 1
|
user_top_p = 0.8
|
||||||
|
|
||||||
if user_n is None or user_n == "":
|
if user_n is None or user_n == "":
|
||||||
user_n = 1
|
user_n = 1
|
||||||
@ -98,6 +102,15 @@ class Chat(Blackbox):
|
|||||||
if user_max_tokens is None or user_max_tokens == "":
|
if user_max_tokens is None or user_max_tokens == "":
|
||||||
user_max_tokens = 1024
|
user_max_tokens = 1024
|
||||||
|
|
||||||
|
if user_stop is None or user_stop == "":
|
||||||
|
user_stop = 100
|
||||||
|
|
||||||
|
if user_frequency_penalty is None or user_frequency_penalty == "":
|
||||||
|
user_frequency_penalty = 0.5
|
||||||
|
|
||||||
|
if user_presence_penalty is None or user_presence_penalty == "":
|
||||||
|
user_presence_penalty = 0.8
|
||||||
|
|
||||||
|
|
||||||
return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context,
|
return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context,
|
||||||
user_temperature, user_top_p, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK)
|
user_temperature, user_top_p, user_n, user_max_tokens,user_stop,user_frequency_penalty,user_presence_penalty)}, status_code=status.HTTP_200_OK)
|
||||||
@ -19,11 +19,11 @@ class G2E(Blackbox):
|
|||||||
return isinstance(data, list)
|
return isinstance(data, list)
|
||||||
|
|
||||||
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
# model_name有 Qwen1.5-14B-Chat , internlm2-chat-20b
|
||||||
def processing(self, model_name, prompt, template, context: list) -> str:
|
def processing(self, model_name, prompt, template, context: list) -> str:
|
||||||
if context == None:
|
if context == None:
|
||||||
context = []
|
context = []
|
||||||
url = 'http://120.196.116.194:48890/v1'
|
#url = 'http://120.196.116.194:48890/v1'
|
||||||
#url = 'http://120.196.116.194:48892/v1'
|
url = 'http://120.196.116.194:48892/v1'
|
||||||
|
|
||||||
background_prompt = '''KOMBUKIKI是一款茶饮料,目标受众 年龄:20-35岁 性别:女性 地点:一线城市、二线城市 职业:精英中产、都市白领 收入水平:中高收入,有一定消费能力 兴趣和爱好:注重健康,有运动习惯
|
background_prompt = '''KOMBUKIKI是一款茶饮料,目标受众 年龄:20-35岁 性别:女性 地点:一线城市、二线城市 职业:精英中产、都市白领 收入水平:中高收入,有一定消费能力 兴趣和爱好:注重健康,有运动习惯
|
||||||
|
|
||||||
@ -42,41 +42,46 @@ class G2E(Blackbox):
|
|||||||
|
|
||||||
KOMBUKIKI康普茶价格 内地常规版:25 RMB 澳门常规版:28-29 MOP'''
|
KOMBUKIKI康普茶价格 内地常规版:25 RMB 澳门常规版:28-29 MOP'''
|
||||||
|
|
||||||
prompt1 = ''''你是琪琪,活泼的康普茶看板娘,同时你对澳门十分熟悉,是一个澳门旅游专家,请回答任何关于澳门旅游的问题,回答尽量简练明了。
|
prompt1 = '''你是琪琪,活泼的康普茶看板娘,同时你对澳门十分熟悉,是一个澳门旅游专家,请回答任何关于澳门旅游的问题,回答尽量简练明了。'''
|
||||||
'''
|
|
||||||
inject_prompt = '(用活泼的语气说话回答,回答严格限制50字以内)'
|
|
||||||
|
|
||||||
|
#inject_prompt = '(用活泼的语气说话回答,回答严格限制50字以内)'
|
||||||
|
inject_prompt = '(回答简练,不要输出重复内容,只讲中文)'
|
||||||
|
|
||||||
prompt_template = [
|
|
||||||
{"role": "system", "content": background_prompt + prompt1},
|
|
||||||
]
|
|
||||||
#prompt_template = [
|
#prompt_template = [
|
||||||
# {"role": "system", "content": ''},
|
# {"role": "system", "content": background_prompt + prompt1},
|
||||||
#]
|
#]
|
||||||
|
prompt_template = [
|
||||||
|
{"role": "system", "content": ''}
|
||||||
|
]
|
||||||
|
|
||||||
messages = prompt_template + context + [
|
messages = prompt_template + context + [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": prompt + inject_prompt
|
"content": prompt
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
print("**** History with current prompt input : ****")
|
||||||
|
print(messages)
|
||||||
client = OpenAI(
|
client = OpenAI(
|
||||||
api_key='YOUR_API_KEY',
|
api_key='YOUR_API_KEY',
|
||||||
base_url=url
|
base_url=url
|
||||||
)
|
)
|
||||||
model_name = client.models.list().data[0].id
|
model_name = client.models.list().data[0].id
|
||||||
|
#model_name = client.models.list().data[1].id
|
||||||
print(model_name)
|
print(model_name)
|
||||||
|
|
||||||
response = client.chat.completions.create(
|
response = client.chat.completions.create(
|
||||||
model=model_name,
|
model=model_name,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
temperature=0.8,
|
temperature=0.8,
|
||||||
top_p=0.8,
|
top_p=0.8,
|
||||||
# max_tokens = 50
|
frequency_penalty=0.5,
|
||||||
|
presence_penalty=0.8,
|
||||||
|
stop=100
|
||||||
)
|
)
|
||||||
|
|
||||||
fastchat_content = response.choices[0].message.content
|
fastchat_content = response.choices[0].message.content
|
||||||
|
print("*** Model response: " + fastchat_content + " ***")
|
||||||
return fastchat_content
|
return fastchat_content
|
||||||
|
|
||||||
async def fast_api_handler(self, request: Request) -> Response:
|
async def fast_api_handler(self, request: Request) -> Response:
|
||||||
|
|||||||
Reference in New Issue
Block a user