import torch
from langchain import FAISS
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFacePipeline
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.document_loaders import TextLoader
"""
本地LLM加载,使用HuggingFacePipeline连接到langchain
"""
localmodels = [
                r'E:\llama\text-generation-webui\models\Baichuan2-7B-Base',
                r'E:\llama\text-generation-webui\models\Baichuan2-7B-Chat',
                r'E:\llama\text-generation-webui\models\mistralai_Mistral-7B-v0.1',
                r'E:\llama\text-generation-webui\models\Qwen_Qwen-7B',
                r'E:\llama\text-generation-webui\models\Qwen_Qwen-14B'
                ]
modeid = localmodels[1]
print(modeid)
tokenizer = AutoTokenizer.from_pretrained(
    modeid, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    modeid, trust_remote_code=True, device_map='cuda:0',torch_dtype=torch.bfloat16)

taskid = "text2text-generation"

pipe = pipeline(
    task=taskid,
    model=model,
    tokenizer=tokenizer,
    max_length=4*1024
    # device=0
)

llm = HuggingFacePipeline(pipeline=pipe)

loader = TextLoader('meici.txt', encoding='utf-8')
doc = loader.load()
print (f"You have {len(doc)} document")
print (f"You have {len(doc[0].page_content)} characters in that document")

# 将小说分割成多个部分
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000, chunk_overlap=400)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=400)
docs = text_splitter.split_documents(doc)

# 获取字符的总数，以便可以计算平均值
num_total_characters = sum([len(x.page_content) for x in docs])

print (f"Now you have {len(docs)} documents that have an average of {num_total_characters / len(docs):,.0f} characters (smaller pieces)")

# 设置 embedding 引擎
embeddings = HuggingFaceEmbeddings(model_name='shibing624/text2vec-base-chinese')

# Embed 文档，然后使用伪数据库将文档和原始文本结合起来
# 这一步会向 OpenAI 发起 API 请求
docsearch = FAISS.from_documents(docs, embeddings)

# 创建QA-retrieval chain
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.as_retriever())

questions = [
        "故事的主角是谁",
        "故事中出现多少个男性角色",
        "故事中出现多少个女性角色",
        "故事中出现多少个人物",
        "故事中最大的官职是什么",
    ]

for query in questions:
    print("Query:", query)
    print("Answer:", qa.run(query))