This commit is contained in:
ACBBZ
2024-05-14 08:14:05 +00:00
committed by superobk
parent 2bcb9d990f
commit 7f59147768
5 changed files with 38 additions and 27 deletions

View File

@ -33,13 +33,11 @@ class ChromaChat(Blackbox):
collection_id = DEFAULT_COLLECTION_ID
else:
collection_id = setting.ChromaSetting.collection_ids[0]
print("collection_id: ",collection_id)
# query it
chroma_result = self.chroma_query(question, collection_id)
print("chroma_result: ",type(chroma_result),chroma_result)
fast_question = "问题: "+ question + "。根据问题,总结以下内容和来源:" + chroma_result
response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字', context=None)
response = self.fastchat(model_name="Qwen1.5-14B-Chat", prompt=fast_question, template='回答限制50字.', context=None, temperature=0.8, top_p=0.8, top_k=-1, n=1, max_tokens=1024)
return response

View File

@ -31,7 +31,7 @@ class ChromaQuery(Blackbox):
def processing(self, question: str, collection_id) -> str:
# load or create collection
collection = self.client.get_or_create_collection(collection_id, embedding_function=self.embedding_model)
collection = self.client.get_collection(collection_id, embedding_function=self.embedding_model)
# query it
results = collection.query(

View File

@ -14,6 +14,9 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTex
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
import chromadb
import os
import tempfile
from ..utils import chroma_setting
from injector import singleton
@ -50,7 +53,7 @@ class ChromaUpsert(Blackbox):
"collection_id": "123",
"action": "upsert",
"content": "file_name or string",
"answer": "success, collection has 100 documents.",
"answer": "collection 123 has 12472 documents. /tmp/Cheap and QuickEfficient Vision-Language Instruction Tuning for Large Language Models.pdf ids is 0~111",
},
]
@ -59,10 +62,8 @@ class ChromaUpsert(Blackbox):
else:
collection_id = "123"
print("file: ",file)
print("file name: ",file.filename)
if file is not None:
file_type = file.filename.split(".")[-1]
file_type = file.split(".")[-1]
print("file_type: ",file_type)
if file_type == "pdf":
loader = PyPDFLoader(file)
@ -80,7 +81,6 @@ class ChromaUpsert(Blackbox):
loader = UnstructuredExcelLoader(file)
loader = PyPDFLoader(file)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0)
@ -90,17 +90,27 @@ class ChromaUpsert(Blackbox):
Chroma.from_documents(documents=docs, embedding=self.embedding_model, ids=ids, collection_name=collection_id, client=self.client)
collection_number = self.client.get_collection(collection_id).count()
response_file = f"collection {collection_id} has {collection_number} documents. {file} ids is 0~{len(docs)-1}"
if string is not None:
# 生成一个新的id ids_string: 1
ids = setting.ChromaSetting.string_ids[0] + 1
# ids = setting.ChromaSetting.string_ids[0] + 1
ids = "1"
Chroma.from_texts(texts=[string], embedding=self.embedding_model, ids=[ids], collection_name=collection_id, client=self.client)
collection_number = self.client.get_collection(collection_id).count()
response = f"collection {collection_id} has {collection_number} documents."
collection_number = self.client.get_collection(collection_id).count()
response_string = f"collection {collection_id} has {collection_number} documents. {string} ids is {ids}"
return response
if file is not None and string is not None:
return response_file + " \n and " + response_string
elif file is not None and string is None:
return response_file
elif file is None and string is not None:
return response_string
@ -118,17 +128,19 @@ class ChromaUpsert(Blackbox):
if user_file is None and user_string is None:
return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST)
# data = await user_file.read()
# with open(f'./{data.filename}', 'wb') as f:
# f.write(content)
if user_file is not None:
pdf_bytes = await user_file.read()
loader = PyPDFLoader(f'./{user_file.filename}')
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
print("docs: ",docs)
custom_filename = user_file.filename
# 获取系统的临时目录路径
safe_filename = os.path.join(tempfile.gettempdir(), os.path.basename(custom_filename))
with open(safe_filename, "wb") as f:
f.write(pdf_bytes)
else:
safe_filename = None
return JSONResponse(
content={"response": self.processing(user_collection_id, user_file, user_string, user_context, user_setting)},
content={"response": self.processing(user_collection_id, safe_filename, user_string, user_context, user_setting)},
status_code=status.HTTP_200_OK)

View File

@ -94,7 +94,7 @@ class Fastchat(Blackbox):
user_model_name = data.get("model_name")
user_context = data.get("context")
user_prompt = data.get("prompt")
user_question = data.get("question")
user_template = data.get("template")
user_temperature = data.get("temperature")
user_top_p = data.get("top_p")
@ -102,7 +102,8 @@ class Fastchat(Blackbox):
user_n = data.get("n")
user_max_tokens = data.get("max_tokens")
if user_prompt is None:
if user_question is None:
return JSONResponse(content={"error": "question is required"}, status_code=status.HTTP_400_BAD_REQUEST)
if user_model_name is None or user_model_name.isspace() or user_model_name == "":
@ -122,11 +123,11 @@ class Fastchat(Blackbox):
user_top_k = -1
if user_n is None or user_n == "":
user_n = 3
user_n = 1
if user_max_tokens is None or user_max_tokens == "":
user_max_tokens = 1024
return JSONResponse(content={"response": self.processing(user_model_name, user_prompt, user_template, user_context,
return JSONResponse(content={"response": self.processing(user_model_name, user_question, user_template, user_context,
user_temperature, user_top_p, user_top_k, user_n, user_max_tokens)}, status_code=status.HTTP_200_OK)

0
test.pdf Normal file
View File