mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
50 lines
1.9 KiB
Python
50 lines
1.9 KiB
Python
from typing import Any, Coroutine
|
|
|
|
from fastapi import Request, Response, status
|
|
from fastapi.responses import JSONResponse
|
|
|
|
from .blackbox import Blackbox
|
|
from lagent.llms.lmdepoly_wrapper import LMDeployClient
|
|
from lagent.llms.meta_template import INTERNLM2_META as META
|
|
from injector import singleton
|
|
|
|
@singleton
|
|
class Emotion(Blackbox):
|
|
|
|
def __init__(self, model_name, model_url) -> None:
|
|
self.model = LMDeployClient(
|
|
model_name=model_name,
|
|
url=model_url,
|
|
meta_template=META,
|
|
top_p=0.8,
|
|
top_k=100,
|
|
temperature=0,
|
|
repetition_penalty=1.0,
|
|
stop_words=['<|im_end|>'])
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
return self.processing(*args, **kwargs)
|
|
|
|
def valid(self, *args, **kwargs) -> bool:
|
|
data = args[0]
|
|
return isinstance(data, str)
|
|
|
|
def processing(self, *args, **kwargs) -> int:
|
|
text = args[0]
|
|
text = "Please use one word to infer the emotion of the following passage:\n" + text + "\nJust print out that signle word pls."
|
|
text = [{'role': 'user', 'content': text}]
|
|
return self.model.stream_chat(text)
|
|
|
|
async def fast_api_handler(self, request) -> Response:
|
|
try:
|
|
data = await request.json()
|
|
except:
|
|
return JSONResponse(content={"error": "json parse error"}, status_code=status.HTTP_400_BAD_REQUEST)
|
|
text = data.get("text")
|
|
if text is None:
|
|
return JSONResponse(content={"error": "text is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
|
text = "Please use one word to infer the emotion of the following passage:\n" + text + "\nJust print out that signle word pls."
|
|
text = [{'role': 'user', 'content': text}]
|
|
sentiment = self.processing(text)
|
|
return JSONResponse(content={"sentiment": sentiment }, status_code=status.HTTP_200_OK)
|
|
|