mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
Merge pull request #26 from BoardWare-Genius/ivan
support vlms streaming output text
This commit is contained in:
@ -1,5 +1,6 @@
|
|||||||
from fastapi import Request, Response, status
|
from fastapi import Request, Response, status
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse, StreamingResponse
|
||||||
|
from sse_starlette.sse import EventSourceResponse
|
||||||
from injector import singleton,inject
|
from injector import singleton,inject
|
||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
|
|
||||||
@ -194,20 +195,21 @@ class VLMS(Blackbox):
|
|||||||
responses = ''
|
responses = ''
|
||||||
total_token_usage = 0 # which can be used to count the cost of a query
|
total_token_usage = 0 # which can be used to count the cost of a query
|
||||||
for i,item in enumerate(api_client.chat_completions_v1(model=model_name,
|
for i,item in enumerate(api_client.chat_completions_v1(model=model_name,
|
||||||
messages=messages,#stream = True,
|
messages=messages,stream = True,
|
||||||
**settings,
|
**settings,
|
||||||
# session_id=,
|
# session_id=,
|
||||||
)):
|
)):
|
||||||
# Stream output
|
# Stream output
|
||||||
# print(item["choices"][0]["delta"]['content'],end='')
|
print(item["choices"][0]["delta"]['content'],end='\n')
|
||||||
# responses += item["choices"][0]["delta"]['content']
|
yield item["choices"][0]["delta"]['content']
|
||||||
|
responses += item["choices"][0]["delta"]['content']
|
||||||
|
|
||||||
print(item["choices"][0]["message"]['content'])
|
# print(item["choices"][0]["message"]['content'])
|
||||||
responses += item["choices"][0]["message"]['content']
|
# responses += item["choices"][0]["message"]['content']
|
||||||
# total_token_usage += item['usage']['total_tokens'] # 'usage': {'prompt_tokens': *, 'total_tokens': *, 'completion_tokens': *}
|
# total_token_usage += item['usage']['total_tokens'] # 'usage': {'prompt_tokens': *, 'total_tokens': *, 'completion_tokens': *}
|
||||||
user_context = messages + [{'role': 'assistant', 'content': responses}]
|
user_context = messages + [{'role': 'assistant', 'content': responses}]
|
||||||
self.custom_print(user_context)
|
self.custom_print(user_context)
|
||||||
return responses, user_context
|
# return responses, user_context
|
||||||
|
|
||||||
def _get_model_url(self,model_name:str | None):
|
def _get_model_url(self,model_name:str | None):
|
||||||
available_models = {}
|
available_models = {}
|
||||||
@ -346,8 +348,11 @@ class VLMS(Blackbox):
|
|||||||
|
|
||||||
# if model_name is None or model_name.isspace():
|
# if model_name is None or model_name.isspace():
|
||||||
# model_name = "Qwen-VL-Chat"
|
# model_name = "Qwen-VL-Chat"
|
||||||
|
# response,_ = self.processing(prompt, img_data,settings, model_name,user_context=user_context)
|
||||||
|
|
||||||
|
# return StreamingResponse(self.processing(prompt, img_data,settings, model_name,user_context=user_context), status_code=status.HTTP_200_OK)
|
||||||
|
return EventSourceResponse(self.processing(prompt, img_data,settings, model_name,user_context=user_context), status_code=status.HTTP_200_OK)
|
||||||
|
|
||||||
|
# HTTP JsonResponse
|
||||||
response, history = self.processing(prompt, img_data,settings, model_name,user_context=user_context)
|
response, history = self.processing(prompt, img_data,settings, model_name,user_context=user_context)
|
||||||
# jsonresp = str(JSONResponse(content={"response": self.processing(prompt, img_data, model_name)}).body, "utf-8")
|
# return JSONResponse(content={"response": response}, status_code=status.HTTP_200_OK)
|
||||||
|
|
||||||
return JSONResponse(content={"response": response}, status_code=status.HTTP_200_OK)
|
|
||||||
Reference in New Issue
Block a user