update chroma and chat

This commit is contained in:
0Xiao0
2024-06-02 15:41:07 +08:00
parent a96e845807
commit 179281f032
9 changed files with 15174 additions and 101 deletions

View File

@ -70,11 +70,11 @@ def get_all_files(folder_path):
# 加载文档和拆分文档
loader = TextLoader("/home/administrator/Workspace/jarvis-models/sample/20240529_store.txt")
loader = TextLoader("/home/administrator/Workspace/jarvis-models/sample/RAG_zh.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=50)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10, chunk_overlap=0, length_function=len, is_separator_regex=True,separators=['\n', '\n\n'])
docs = text_splitter.split_documents(documents)
@ -99,65 +99,66 @@ db = Chroma.from_documents(documents=docs, embedding=embedding_model, ids=ids, c
start_time3 = time.time()
print("insert time ", start_time3 - start_time2)
collection_number = client.get_or_create_collection(id).count()
print("collection_number",collection_number)
# # chroma 召回
# from chromadb.utils import embedding_functions
# embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/home/administrator/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
# client = chromadb.HttpClient(host='172.16.5.8', port=7000)
# collection = client.get_collection("g2e", embedding_function=embedding_model)
# chroma 召回
from chromadb.utils import embedding_functions
embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/home/administrator/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
client = chromadb.HttpClient(host='172.16.5.8', port=7000)
collection = client.get_collection("g2e", embedding_function=embedding_model)
# print(collection.count())
# import time
# start_time = time.time()
# query = "如何前往威尼斯人"
# # query it
# results = collection.query(
# query_texts=[query],
# n_results=3,
# )
print(collection.count())
import time
start_time = time.time()
query = "如何前往威尼斯人"
# query it
results = collection.query(
query_texts=[query],
n_results=3,
)
response = results["documents"]
print("response: ", response)
print("time: ", time.time() - start_time)
# response = results["documents"]
# print("response: ", response)
# print("time: ", time.time() - start_time)
# 结合大模型进行总结
import requests
# # 结合大模型进行总结
# import requests
model_name = "Qwen1.5-14B-Chat"
chat_inputs={
"model": model_name,
"messages": [
{
"role": "user",
"content": f"问题: {query}。- 根据知识库内的检索结果,以清晰简洁的表达方式回答问题。- 只从检索内容中选取与问题密切相关的信息。- 不要编造答案,如果答案不在经核实的资料中或无法从经核实的资料中得出,请回答“我无法回答您的问题。”检索内容:{response}"
}
],
# "temperature": 0,
# "top_p": user_top_p,
# "n": user_n,
# "max_tokens": user_max_tokens,
# "frequency_penalty": user_frequency_penalty,
# "presence_penalty": user_presence_penalty,
# "stop": 100
}
# model_name = "Qwen1.5-14B-Chat"
# chat_inputs={
# "model": model_name,
# "messages": [
# {
# "role": "user",
# "content": f"问题: {query}。- 根据知识库内的检索结果,以清晰简洁的表达方式回答问题。- 只从检索内容中选取与问题密切相关的信息。- 不要编造答案,如果答案不在经核实的资料中或无法从经核实的资料中得出,请回答“我无法回答您的问题。”检索内容:{response}"
# }
# ],
# # "temperature": 0,
# # "top_p": user_top_p,
# # "n": user_n,
# # "max_tokens": user_max_tokens,
# # "frequency_penalty": user_frequency_penalty,
# # "presence_penalty": user_presence_penalty,
# # "stop": 100
# }
key ="YOUR_API_KEY"
# key ="YOUR_API_KEY"
header = {
'Content-Type': 'application/json',
'Authorization': "Bearer " + key
}
url = "http://172.16.5.8:23333/v1/chat/completions"
# header = {
# 'Content-Type': 'application/json',
# 'Authorization': "Bearer " + key
# }
# url = "http://172.16.5.8:23333/v1/chat/completions"
fastchat_response = requests.post(url, json=chat_inputs, headers=header)
# print(fastchat_response.json())
# fastchat_response = requests.post(url, json=chat_inputs, headers=header)
# # print(fastchat_response.json())
print("\n question: ", query)
print("\n ",model_name, fastchat_response.json()["choices"][0]["message"]["content"])
# print("\n question: ", query)
# print("\n ",model_name, fastchat_response.json()["choices"][0]["message"]["content"])