Merge main into Tom

This commit is contained in:
tom
2025-08-18 17:37:07 +08:00
13 changed files with 524 additions and 192 deletions

View File

@ -8,7 +8,7 @@ import requests
import json
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader, TextLoader, UnstructuredHTMLLoader, JSONLoader, Docx2txtLoader, UnstructuredExcelLoader
from langchain_community.document_loaders import UnstructuredMarkdownLoader, DirectoryLoader, TextLoader, UnstructuredHTMLLoader, JSONLoader, Docx2txtLoader, UnstructuredExcelLoader, UnstructuredPDFLoader
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
@ -21,6 +21,10 @@ import logging
from ..log.logging_time import logging_time
import re
from pathlib import Path
from ..configuration import Configuration
from ..configuration import PathConf
logger = logging.getLogger
DEFAULT_COLLECTION_ID = "123"
@ -31,9 +35,13 @@ class ChromaUpsert(Blackbox):
def __init__(self, *args, **kwargs) -> None:
# config = read_yaml(args[0])
# load embedding model
self.embedding_model_1 = SentenceTransformerEmbeddings(model_name="/Workspace/Models/BAAI/bge-large-zh-v1.5", model_kwargs={"device": "cuda"})
path = PathConf(Configuration())
self.model_path = Path(path.chroma_rerank_embedding_model)
self.embedding_model_1 = SentenceTransformerEmbeddings(model_name=str(self.model_path / "bge-large-zh-v1.5"), model_kwargs={"device": "cuda"})
# load chroma db
self.client_1 = chromadb.HttpClient(host='10.6.44.141', port=7000)
self.client_1 = chromadb.HttpClient(host='localhost', port=7000)
def __call__(self, *args, **kwargs):
return self.processing(*args, **kwargs)
@ -51,45 +59,44 @@ class ChromaUpsert(Blackbox):
# # chroma_query settings
if "settings" in settings:
chroma_embedding_model = settings["settings"].get("chroma_embedding_model")
chroma_host = settings["settings"].get("chroma_host")
chroma_port = settings["settings"].get("chroma_port")
chroma_collection_id = settings["settings"].get("chroma_collection_id")
chroma_host = settings["settings"].get("chroma_host", "localhost")
chroma_port = settings["settings"].get("chroma_port", "7000")
chroma_collection_id = settings["settings"].get("chroma_collection_id", DEFAULT_COLLECTION_ID)
user_chunk_size = settings["settings"].get("chunk_size", 256)
user_chunk_overlap = settings["settings"].get("chunk_overlap", 10)
user_separators = settings["settings"].get("separators", ["\n\n"])
else:
chroma_embedding_model = settings.get("chroma_embedding_model")
chroma_host = settings.get("chroma_host")
chroma_port = settings.get("chroma_port")
chroma_collection_id = settings.get("chroma_collection_id")
chroma_host = settings.get("chroma_host", "localhost")
chroma_port = settings.get("chroma_port", "7000")
chroma_collection_id = settings.get("chroma_collection_id", DEFAULT_COLLECTION_ID)
user_chunk_size = settings.get("chunk_size", 256)
user_chunk_overlap = settings.get("chunk_overlap", 10)
user_separators = settings.get("separators", ["\n\n"])
if chroma_embedding_model is None or chroma_embedding_model.isspace() or chroma_embedding_model == "":
chroma_embedding_model = "/Workspace/Models/BAAI/bge-large-zh-v1.5"
if chroma_host is None or chroma_host.isspace() or chroma_host == "":
chroma_host = "10.6.82.192"
if chroma_port is None or chroma_port.isspace() or chroma_port == "":
chroma_port = "8000"
if chroma_collection_id is None or chroma_collection_id.isspace() or chroma_collection_id == "":
chroma_collection_id = "g2e"
chroma_embedding_model = model_name=str(self.model_path / "bge-large-zh-v1.5")
# load client and embedding model from init
if re.search(r"10.6.82.192", chroma_host) and re.search(r"8000", chroma_port):
if re.search(r"localhost", chroma_host) and re.search(r"7000", chroma_port):
client = self.client_1
else:
client = chromadb.HttpClient(host=chroma_host, port=chroma_port)
print(f"chroma_embedding_model: {chroma_embedding_model}")
if re.search(r"/Workspace/Models/BAAI/bge-large-zh-v1.5", chroma_embedding_model):
if re.search(str(self.model_path / "bge-large-zh-v1.5"), chroma_embedding_model):
embedding_model = self.embedding_model_1
else:
embedding_model = SentenceTransformerEmbeddings(model_name=chroma_embedding_model, device = "cuda:0")
embedding_model = SentenceTransformerEmbeddings(model_name=chroma_embedding_model, model_kwargs={"device": "cuda"})
response_file =''
response_string = ''
if file is not None:
text_splitter = RecursiveCharacterTextSplitter(chunk_size=user_chunk_size, chunk_overlap=user_chunk_overlap, separators=user_separators)
file_type = file.split(".")[-1]
print("file_type: ",file_type)
if file_type == "pdf":
loader = PyPDFLoader(file)
loader = UnstructuredPDFLoader(file)
elif file_type == "txt":
loader = TextLoader(file)
elif file_type == "csv":
@ -102,9 +109,10 @@ class ChromaUpsert(Blackbox):
loader = Docx2txtLoader(file)
elif file_type == "xlsx":
loader = UnstructuredExcelLoader(file)
elif file_type == "md":
loader = UnstructuredMarkdownLoader(file, mode="single", strategy="fast")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
@ -148,13 +156,13 @@ class ChromaUpsert(Blackbox):
if user_text is not None and user_text_ids is None:
return JSONResponse(content={"error": "text_ids is required when text is provided"}, status_code=status.HTTP_400_BAD_REQUEST)
if user_file is not None:
if user_file is not None and user_file.size != 0:
pdf_bytes = await user_file.read()
custom_filename = user_file.filename
# 获取系统的临时目录路径
safe_filename = os.path.join(tempfile.gettempdir(), os.path.basename(custom_filename))
print("file_path", safe_filename)
with open(safe_filename, "wb") as f:
f.write(pdf_bytes)
else: