当前位置:   article > 正文

使用huggingface的text embedding models_huggingfacebgeembeddings

huggingfacebgeembeddings
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader

embeddings = HuggingFaceEmbeddings()

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# # 第一次存入本地
# vectorstore = FAISS.from_documents(documents, embeddings)
# vectorstore.save_local("faiss_index2")


# 记录开始时间
start_time = time.time()

# # 从本地加载
vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72

上面是使用的默认的模型,以下指定使用 all-MiniLM-L6-v2:

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader
model_name = "all-MiniLM-L6-v2"
embeddings = HuggingFaceEmbeddings(
         model_name=model_name,
)

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# 记录开始时间
start_time = time.time()

# 第一次存入本地
vectorstore = FAISS.from_documents(documents, embeddings)
vectorstore.save_local("faiss_index2")


# # 从本地加载
# vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74

关于可以使用的模型,可以看这里

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/264276
推荐阅读
相关标签
  

闽ICP备14008679号