文心一言的key申请,参考:
LangChain 三行代码实现 文心一言 Prompt+LLM 大模型问答
https://www.jianshu.com/p/8ec9ea907446
上干货:
from langchain_community.llms import QianfanLLMEndpoint
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.document_loaders import UnstructuredFileLoader
WENXIN_APP_Key = "your key"
WENXIN_APP_SECRET = "your secret"
# 1 构建 检索向量库
# 1.1 加载长文档
loader = UnstructuredFileLoader("test_data/test.txt")
document = loader.load()
# 1.2 chunk
text_splitter = RecursiveCharacterTextSplitter(chunk_size=384, chunk_overlap=0, separators=["\n\n", "\n", " ", "", "。", ","])
documents = text_splitter.split_documents(document)
# 1.3 向量模型,向量库
embeddings = QianfanEmbeddingsEndpoint(qianfan_ak=WENXIN_APP_Key,qianfan_sk=WENXIN_APP_SECRET,model="Embedding-V1")
retriever = Chroma.from_documents(documents, embeddings).as_retriever()
# 2 定义问答大模型和消息记录缓存
memory = ConversationBufferMemory(memory_key = "chat_history", return_messages=True)
llm = QianfanLLMEndpoint(model="ERNIE-Bot", qianfan_ak=WENXIN_APP_Key, qianfan_sk=WENXIN_APP_SECRET)
qa = ConversationalRetrievalChain.from_llm(llm, retriever, memory=memory)
result = qa({"question": "三级冻伤是什么意思"})
print(result)