style: convert /home/gpu/ to /

This commit is contained in:
0Xiao0
2024-10-28 17:38:40 +08:00
parent f4b971d2fd
commit 8fe010bbbe
12 changed files with 49 additions and 49 deletions

View File

@ -70,7 +70,7 @@ def get_all_files(folder_path):
# 加载文档和拆分文档
loader = TextLoader("/home/gpu/Workspace/jarvis-models/sample/RAG_zh.txt")
loader = TextLoader("/Workspace/jarvis-models/sample/RAG_zh.txt")
documents = loader.load()
@ -84,8 +84,8 @@ ids = ["20240521_store"+str(i) for i in range(len(docs))]
# 加载embedding模型和chroma server
embedding_model = SentenceTransformerEmbeddings(model_name='/home/gpu/Workspace/Models/BAAI/bge-large-zh-v1.5', model_kwargs={"device": "cuda"})
client = chromadb.HttpClient(host='10.6.81.119', port=7000)
embedding_model = SentenceTransformerEmbeddings(model_name='/Workspace/Models/BAAI/bge-large-zh-v1.5', model_kwargs={"device": "cuda"})
client = chromadb.HttpClient(host='192.168.0.200', port=7000)
id = "g2e"
#client.delete_collection(id)
@ -106,8 +106,8 @@ print("collection_number",collection_number)
# # chroma 召回
# from chromadb.utils import embedding_functions
# embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/home/gpu/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
# client = chromadb.HttpClient(host='10.6.81.119', port=7000)
# embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
# client = chromadb.HttpClient(host='192.168.0.200', port=7000)
# collection = client.get_collection("g2e", embedding_function=embedding_model)
# print(collection.count())
@ -152,7 +152,7 @@ print("collection_number",collection_number)
# 'Content-Type': 'application/json',
# 'Authorization': "Bearer " + key
# }
# url = "http://10.6.81.119:23333/v1/chat/completions"
# url = "http://192.168.0.200:23333/v1/chat/completions"
# fastchat_response = requests.post(url, json=chat_inputs, headers=header)
# # print(fastchat_response.json())

View File

@ -70,7 +70,7 @@ def get_all_files(folder_path):
# 加载文档和拆分文档
loader = TextLoader("/home/gpu/Workspace/jarvis-models/sample/RAG_en.txt")
loader = TextLoader("/Workspace/jarvis-models/sample/RAG_en.txt")
documents = loader.load()
@ -84,8 +84,8 @@ ids = ["20240521_store"+str(i) for i in range(len(docs))]
# 加载embedding模型和chroma server
embedding_model = SentenceTransformerEmbeddings(model_name='/home/gpu/Workspace/Models/BAAI/bge-small-en-v1.5', model_kwargs={"device": "cuda"})
client = chromadb.HttpClient(host='10.6.81.119', port=7000)
embedding_model = SentenceTransformerEmbeddings(model_name='/Workspace/Models/BAAI/bge-small-en-v1.5', model_kwargs={"device": "cuda"})
client = chromadb.HttpClient(host='192.168.0.200', port=7000)
id = "g2e_english"
client.delete_collection(id)
@ -106,8 +106,8 @@ print("collection_number",collection_number)
# # chroma 召回
# from chromadb.utils import embedding_functions
# embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/home/gpu/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
# client = chromadb.HttpClient(host='10.6.81.119', port=7000)
# embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
# client = chromadb.HttpClient(host='192.168.0.200', port=7000)
# collection = client.get_collection("g2e", embedding_function=embedding_model)
# print(collection.count())
@ -152,7 +152,7 @@ print("collection_number",collection_number)
# 'Content-Type': 'application/json',
# 'Authorization': "Bearer " + key
# }
# url = "http://10.6.81.119:23333/v1/chat/completions"
# url = "http://192.168.0.200:23333/v1/chat/completions"
# fastchat_response = requests.post(url, json=chat_inputs, headers=header)
# # print(fastchat_response.json())

View File

@ -66,7 +66,7 @@ def get_all_files(folder_path):
# 加载文档和拆分文档
# loader = TextLoader("/home/gpu/Workspace/jarvis-models/sample/RAG_zh.txt")
# loader = TextLoader("/Workspace/jarvis-models/sample/RAG_zh.txt")
# documents = loader.load()
@ -80,8 +80,8 @@ def get_all_files(folder_path):
# # 加载embedding模型和chroma server
# embedding_model = SentenceTransformerEmbeddings(model_name='/home/gpu/Workspace/Models/BAAI/bge-large-zh-v1.5', model_kwargs={"device": "cuda"})
# client = chromadb.HttpClient(host='10.6.81.119', port=7000)
# embedding_model = SentenceTransformerEmbeddings(model_name='/Workspace/Models/BAAI/bge-large-zh-v1.5', model_kwargs={"device": "cuda"})
# client = chromadb.HttpClient(host='192.168.0.200', port=7000)
# id = "g2e"
# client.delete_collection(id)
@ -102,8 +102,8 @@ def get_all_files(folder_path):
# chroma 召回
from chromadb.utils import embedding_functions
embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/home/gpu/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
client = chromadb.HttpClient(host='10.6.81.119', port=7000)
embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/Workspace/Models/BAAI/bge-large-zh-v1.5", device = "cuda")
client = chromadb.HttpClient(host='192.168.0.200', port=7000)
collection = client.get_collection("g2e", embedding_function=embedding_model)
print(collection.count())
@ -148,7 +148,7 @@ print("time: ", time.time() - start_time)
# 'Content-Type': 'application/json',
# 'Authorization': "Bearer " + key
# }
# url = "http://10.6.81.119:23333/v1/chat/completions"
# url = "http://192.168.0.200:23333/v1/chat/completions"
# fastchat_response = requests.post(url, json=chat_inputs, headers=header)
# # print(fastchat_response.json())

View File

@ -9,16 +9,16 @@ from langchain_community.embeddings.sentence_transformer import SentenceTransfor
import time
# chroma run --path chroma_db/ --port 8000 --host 0.0.0.0
# loader = TextLoader("/home/administrator/Workspace/chroma_data/粤语语料.txt",encoding="utf-8")
loader = TextLoader("/home/administrator/Workspace/jarvis-models/sample/RAG_boss.txt")
# loader = TextLoader("/Workspace/chroma_data/粤语语料.txt",encoding="utf-8")
loader = TextLoader("/Workspace/jarvis-models/sample/RAG_boss.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10, chunk_overlap=0, length_function=len, is_separator_regex=True,separators=['\n', '\n\n'])
docs = text_splitter.split_documents(documents)
print("len(docs)", len(docs))
ids = ["粤语语料"+str(i) for i in range(len(docs))]
embedding_model = SentenceTransformerEmbeddings(model_name='/home/administrator/Workspace/Models/BAAI/bge-m3', model_kwargs={"device": "cuda:1"})
client = chromadb.HttpClient(host='172.16.4.7', port=7000)
embedding_model = SentenceTransformerEmbeddings(model_name='/Workspace/Models/BAAI/bge-m3', model_kwargs={"device": "cuda:1"})
client = chromadb.HttpClient(host='192.168.0.200', port=7000)
id = "boss"
client.delete_collection(id)
@ -28,13 +28,13 @@ db = Chroma.from_documents(documents=docs, embedding=embedding_model, ids=ids, c
embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/home/administrator/Workspace/Models/BAAI/bge-m3", device = "cuda:1")
embedding_model = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="/Workspace/Models/BAAI/bge-m3", device = "cuda:1")
client = chromadb.HttpClient(host='172.16.4.7', port=7000)
client = chromadb.HttpClient(host='192.168.0.200', port=7000)
collection = client.get_collection(id, embedding_function=embedding_model)
reranker_model = CrossEncoder("/home/administrator/Workspace/Models/BAAI/bge-reranker-v2-m3", max_length=512, device = "cuda:1")
reranker_model = CrossEncoder("/Workspace/Models/BAAI/bge-reranker-v2-m3", max_length=512, device = "cuda:1")
while True:
usr_question = input("\n 请输入问题: ")