Skip to main content

自托管嵌入

让我们加载SelfHostedEmbeddings、SelfHostedHuggingFaceEmbeddings和SelfHostedHuggingFaceInstructEmbeddings类。

from langchain.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
import runhouse as rh
# 对于使用GCP、Azure或Lambda的按需A100
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)

# 对于使用AWS的按需A10G(AWS上没有单个A100)
# gpu = rh.cluster(name='rh-a10x', instance_type='g5.2xlarge', provider='aws')

# 对于现有的集群
# gpu = rh.cluster(ips=['<集群的IP>'],
# ssh_creds={'ssh_user': '...', 'ssh_private_key':'<密钥路径>'},
# name='my-cluster')
embeddings = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
text = "这是一个测试文档。"
query_result = embeddings.embed_query(text)

对于SelfHostedHuggingFaceInstructEmbeddings也是类似的:

embeddings = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)

现在让我们使用自定义的加载函数加载一个嵌入模型:

def get_pipeline():
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
pipeline,
) # 必须在笔记本中的函数内部

model_id = "facebook/bart-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)


def inference_fn(pipeline, prompt):
# 返回模型的最后一个隐藏状态
if isinstance(prompt, list):
return [emb[0][-1] for emb in pipeline(prompt)]
return pipeline(prompt)[0][-1]
embeddings = SelfHostedEmbeddings(
model_load_fn=get_pipeline,
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
inference_fn=inference_fn,
)
query_result = embeddings.embed_query(text)