赞
踩
中文向量化模型候选:
向量模型的各任务效果对比:
1、sentence-transformers/all-MiniLM-L6-v2 处理的token数量,向量维度为384维,支持多种语言。语义表达较差
2、Text-embedding-ada-002 处理token数量<=6000,向量维度1536
2、BAAI/bge-m3
3、多语言模型:BAAI/bge-m3 支持的输入长度<=8192
向量索引构建: 向量近似查询的比较HNSW和OPQ:图像检索:OPQ索引与HNSW索引
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
model_name = "sentence-transformers/all-MiniLM-L6-v2"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": True}
embeddings = HuggingFaceBgeEmbeddings(
model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs
)
1·、存储源为elasticsearch
from typing import Any, Dict, Iterable
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from langchain.embeddings import DeterministicFakeEmbedding
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_elasticsearch import ElasticsearchRetrieveres_url = "http://user:password@localhost:9200"
es_client = Elasticsearch(hosts=[es_url])
es_client.info()index_name = "test-langchain-retriever"
text_field = "text"
dense_vector_field = "fake_embedding"
num_characters_field = "num_characters"
texts = [
"foo",
"bar",
"world",
"hello world",
"hello",
"foo bar",
"bla bla foo",
]def create_index(
es_client: Elasticsearch,
index_name: str,
text_field: str,
dense_vector_field: str,
num_characters_field: str,
):
es_client.indices.create(
index=index_name,
mappings={
"properties": {
text_field: {"type": "text"},
dense_vector_field: {"type": "dense_vector"},
num_characters_field: {"type": "integer"},
}
},
)
def index_data(
es_client: Elasticsearch,
index_name: str,
text_field: str,
dense_vector_field: str,
embeddings: Embeddings,
texts: Iterable[str],
refresh: bool = True,
) -> None:
create_index(
es_client, index_name, text_field, dense_vector_field, num_characters_field
)vectors = embeddings.embed_documents(list(texts))
requests = [
{
"_op_type": "index",
"_index": index_name,
"_id": i,
text_field: text,
dense_vector_field: vector,
num_characters_field: len(text),
}
for i, (text, vector) in enumerate(zip(texts, vectors))
]bulk(es_client, requests)
if refresh:
es_client.indices.refresh(index=index_name)index_data(es_client, index_name, text_field, dense_vector_field, embeddings, texts)
2、elasticsearch 向量检索:
es_url = "http://user:password@localhost:9200"
index_name = "test-langchain-retriever"
text_field = "text"
dense_vector_field = "fake_embedding"
num_characters_field = "num_characters"def gen_dsl(search_query: str) -> Dict:
vector = embeddings.embed_query(search_query) # same embeddings as for indexing
return {
"knn": {
"field": dense_vector_field,
"query_vector": vector,
"k": 5,
"num_candidates": 10,
}
}
vector_retriever = ElasticsearchRetriever.from_es_params(
index_name=index_name,
body_func=vector_query,
content_field=text_field,
url=es_url,
)vector_retriever.invoke("foo")
说明:简单的向量检索,耗时比较长。
原因:1、直接对全局使用了余弦相似度计算。(cos),未做任何优化
2、返回数据将向量内容全部返回
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。