mirror of
https://github.com/BoardWare-Genius/jarvis-models.git
synced 2025-12-13 16:53:24 +00:00
feat: update chroma_upsert
This commit is contained in:
@ -8,7 +8,7 @@ import requests
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from langchain_community.document_loaders.csv_loader import CSVLoader
|
from langchain_community.document_loaders.csv_loader import CSVLoader
|
||||||
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader, TextLoader, UnstructuredHTMLLoader, JSONLoader, Docx2txtLoader, UnstructuredExcelLoader
|
from langchain_community.document_loaders import UnstructuredMarkdownLoader, DirectoryLoader, TextLoader, UnstructuredHTMLLoader, JSONLoader, Docx2txtLoader, UnstructuredExcelLoader, UnstructuredPDFLoader
|
||||||
from langchain_community.vectorstores import Chroma
|
from langchain_community.vectorstores import Chroma
|
||||||
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
|
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
|
||||||
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
|
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
|
||||||
@ -51,25 +51,7 @@ class ChromaUpsert(Blackbox):
|
|||||||
return isinstance(data, list)
|
return isinstance(data, list)
|
||||||
|
|
||||||
# @logging_time(logger=logger)
|
# @logging_time(logger=logger)
|
||||||
def processing(self, file, string, context: list, settings: dict) -> str:
|
def processing(self, file, string, string_ids, settings: dict) -> str:
|
||||||
# 用户的操作历史
|
|
||||||
if context is None:
|
|
||||||
context = []
|
|
||||||
|
|
||||||
# context = [
|
|
||||||
# {
|
|
||||||
# "collection_id": "123",
|
|
||||||
# "action": "query",
|
|
||||||
# "content": "你吃饭了吗",
|
|
||||||
# "answer": "吃了",
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# "collection_id": "123",
|
|
||||||
# "action": "upsert",
|
|
||||||
# "content": "file_name or string",
|
|
||||||
# "answer": "collection 123 has 12472 documents. /tmp/Cheap and Quick:Efficient Vision-Language Instruction Tuning for Large Language Models.pdf ids is 0~111",
|
|
||||||
# },
|
|
||||||
# ]
|
|
||||||
|
|
||||||
if settings is None:
|
if settings is None:
|
||||||
settings = {}
|
settings = {}
|
||||||
@ -79,12 +61,18 @@ class ChromaUpsert(Blackbox):
|
|||||||
chroma_embedding_model = settings["settings"].get("chroma_embedding_model")
|
chroma_embedding_model = settings["settings"].get("chroma_embedding_model")
|
||||||
chroma_host = settings["settings"].get("chroma_host")
|
chroma_host = settings["settings"].get("chroma_host")
|
||||||
chroma_port = settings["settings"].get("chroma_port")
|
chroma_port = settings["settings"].get("chroma_port")
|
||||||
chroma_collection_id = settings["settings"].get("chroma_collection_id")
|
chroma_collection_id = settings["settings"].get("chroma_collection_id")
|
||||||
|
user_chunk_size = settings["settings"].get("chunk_size", 256)
|
||||||
|
user_chunk_overlap = settings["settings"].get("chunk_overlap", 10)
|
||||||
|
user_separators = settings["settings"].get("separators", ["\n\n"])
|
||||||
else:
|
else:
|
||||||
chroma_embedding_model = settings.get("chroma_embedding_model")
|
chroma_embedding_model = settings.get("chroma_embedding_model")
|
||||||
chroma_host = settings.get("chroma_host")
|
chroma_host = settings.get("chroma_host")
|
||||||
chroma_port = settings.get("chroma_port")
|
chroma_port = settings.get("chroma_port")
|
||||||
chroma_collection_id = settings.get("chroma_collection_id")
|
chroma_collection_id = settings.get("chroma_collection_id")
|
||||||
|
user_chunk_size = settings.get("chunk_size", 256)
|
||||||
|
user_chunk_overlap = settings.get("chunk_overlap", 10)
|
||||||
|
user_separators = settings.get("separators", ["\n\n"])
|
||||||
|
|
||||||
if chroma_embedding_model is None or chroma_embedding_model.isspace() or chroma_embedding_model == "":
|
if chroma_embedding_model is None or chroma_embedding_model.isspace() or chroma_embedding_model == "":
|
||||||
chroma_embedding_model = model_name=str(self.model_path / "bge-large-zh-v1.5")
|
chroma_embedding_model = model_name=str(self.model_path / "bge-large-zh-v1.5")
|
||||||
@ -94,9 +82,6 @@ class ChromaUpsert(Blackbox):
|
|||||||
|
|
||||||
if chroma_port is None or chroma_port.isspace() or chroma_port == "":
|
if chroma_port is None or chroma_port.isspace() or chroma_port == "":
|
||||||
chroma_port = "7000"
|
chroma_port = "7000"
|
||||||
|
|
||||||
if chroma_collection_id is None or chroma_collection_id.isspace() or chroma_collection_id == "":
|
|
||||||
chroma_collection_id = "g2e"
|
|
||||||
|
|
||||||
# load client and embedding model from init
|
# load client and embedding model from init
|
||||||
if re.search(r"localhost", chroma_host) and re.search(r"7000", chroma_port):
|
if re.search(r"localhost", chroma_host) and re.search(r"7000", chroma_port):
|
||||||
@ -104,17 +89,19 @@ class ChromaUpsert(Blackbox):
|
|||||||
else:
|
else:
|
||||||
client = chromadb.HttpClient(host=chroma_host, port=chroma_port)
|
client = chromadb.HttpClient(host=chroma_host, port=chroma_port)
|
||||||
print(f"chroma_embedding_model: {chroma_embedding_model}")
|
print(f"chroma_embedding_model: {chroma_embedding_model}")
|
||||||
if re.search((self.model_path / "bge-large-zh-v1.5"), chroma_embedding_model):
|
if re.search(str(self.model_path / "bge-large-zh-v1.5"), chroma_embedding_model):
|
||||||
embedding_model = self.embedding_model_1
|
embedding_model = self.embedding_model_1
|
||||||
else:
|
else:
|
||||||
embedding_model = SentenceTransformerEmbeddings(model_name=chroma_embedding_model, device = "cuda:0")
|
embedding_model = SentenceTransformerEmbeddings(model_name=chroma_embedding_model, model_kwargs={"device": "cuda"})
|
||||||
|
|
||||||
|
|
||||||
if file is not None:
|
if file is not None:
|
||||||
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=user_chunk_size, chunk_overlap=user_chunk_overlap, separators=user_separators)
|
||||||
|
|
||||||
file_type = file.split(".")[-1]
|
file_type = file.split(".")[-1]
|
||||||
print("file_type: ",file_type)
|
print("file_type: ",file_type)
|
||||||
if file_type == "pdf":
|
if file_type == "pdf":
|
||||||
loader = PyPDFLoader(file)
|
loader = UnstructuredPDFLoader(file)
|
||||||
elif file_type == "txt":
|
elif file_type == "txt":
|
||||||
loader = TextLoader(file)
|
loader = TextLoader(file)
|
||||||
elif file_type == "csv":
|
elif file_type == "csv":
|
||||||
@ -127,9 +114,10 @@ class ChromaUpsert(Blackbox):
|
|||||||
loader = Docx2txtLoader(file)
|
loader = Docx2txtLoader(file)
|
||||||
elif file_type == "xlsx":
|
elif file_type == "xlsx":
|
||||||
loader = UnstructuredExcelLoader(file)
|
loader = UnstructuredExcelLoader(file)
|
||||||
|
elif file_type == "md":
|
||||||
|
loader = UnstructuredMarkdownLoader(file, mode="single", strategy="fast")
|
||||||
|
|
||||||
documents = loader.load()
|
documents = loader.load()
|
||||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0)
|
|
||||||
|
|
||||||
docs = text_splitter.split_documents(documents)
|
docs = text_splitter.split_documents(documents)
|
||||||
|
|
||||||
@ -140,10 +128,10 @@ class ChromaUpsert(Blackbox):
|
|||||||
collection_number = client.get_collection(chroma_collection_id).count()
|
collection_number = client.get_collection(chroma_collection_id).count()
|
||||||
response_file = f"collection {chroma_collection_id} has {collection_number} documents. {file} ids is 0~{len(docs)-1}"
|
response_file = f"collection {chroma_collection_id} has {collection_number} documents. {file} ids is 0~{len(docs)-1}"
|
||||||
|
|
||||||
if string is not None:
|
if string is not None and string_ids is not None:
|
||||||
# 生成一个新的id ids_string: 1
|
# 生成一个新的id ids_string: 1
|
||||||
# ids = setting.ChromaSetting.string_ids[0] + 1
|
# ids = setting.ChromaSetting.string_ids[0] + 1
|
||||||
ids = "1"
|
ids = string_ids
|
||||||
|
|
||||||
Chroma.from_texts(texts=[string], embedding=embedding_model, ids=[ids], collection_name=chroma_collection_id, client=client)
|
Chroma.from_texts(texts=[string], embedding=embedding_model, ids=[ids], collection_name=chroma_collection_id, client=client)
|
||||||
|
|
||||||
@ -151,7 +139,7 @@ class ChromaUpsert(Blackbox):
|
|||||||
collection_number = client.get_collection(chroma_collection_id).count()
|
collection_number = client.get_collection(chroma_collection_id).count()
|
||||||
response_string = f"collection {chroma_collection_id} has {collection_number} documents. {string} ids is {ids}"
|
response_string = f"collection {chroma_collection_id} has {collection_number} documents. {string} ids is {ids}"
|
||||||
|
|
||||||
|
print(client.get_collection(chroma_collection_id).get())
|
||||||
if file is not None and string is not None:
|
if file is not None and string is not None:
|
||||||
return response_file + " \n and " + response_string
|
return response_file + " \n and " + response_string
|
||||||
elif file is not None and string is None:
|
elif file is not None and string is None:
|
||||||
@ -165,7 +153,7 @@ class ChromaUpsert(Blackbox):
|
|||||||
|
|
||||||
user_file = (await request.form()).get("file")
|
user_file = (await request.form()).get("file")
|
||||||
user_string = (await request.form()).get("string")
|
user_string = (await request.form()).get("string")
|
||||||
context = (await request.form()).get("context")
|
user_string_ids = (await request.form()).get("string_ids")
|
||||||
setting: dict = (await request.form()).get("settings")
|
setting: dict = (await request.form()).get("settings")
|
||||||
|
|
||||||
if isinstance(setting, str):
|
if isinstance(setting, str):
|
||||||
@ -176,21 +164,24 @@ class ChromaUpsert(Blackbox):
|
|||||||
|
|
||||||
if user_file is None and user_string is None:
|
if user_file is None and user_string is None:
|
||||||
return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
return JSONResponse(content={"error": "file or string is required"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|
||||||
|
if user_string is not None and user_string_ids is None:
|
||||||
|
return JSONResponse(content={"error": "string_ids is required when string is provided"}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|
||||||
if user_file is not None:
|
if user_file is not None and user_file.size != 0:
|
||||||
pdf_bytes = await user_file.read()
|
pdf_bytes = await user_file.read()
|
||||||
|
|
||||||
custom_filename = user_file.filename
|
custom_filename = user_file.filename
|
||||||
# 获取系统的临时目录路径
|
# 获取系统的临时目录路径
|
||||||
safe_filename = os.path.join(tempfile.gettempdir(), os.path.basename(custom_filename))
|
safe_filename = os.path.join(tempfile.gettempdir(), os.path.basename(custom_filename))
|
||||||
|
print("file_path", safe_filename)
|
||||||
with open(safe_filename, "wb") as f:
|
with open(safe_filename, "wb") as f:
|
||||||
f.write(pdf_bytes)
|
f.write(pdf_bytes)
|
||||||
else:
|
else:
|
||||||
safe_filename = None
|
safe_filename = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
txt = self.processing(safe_filename, user_string, context, setting)
|
txt = self.processing(safe_filename, user_string, user_string_ids, setting)
|
||||||
print(txt)
|
print(txt)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
return JSONResponse(content={"error": str(e)}, status_code=status.HTTP_400_BAD_REQUEST)
|
return JSONResponse(content={"error": str(e)}, status_code=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|||||||
Reference in New Issue
Block a user