from fastapi import FastAPI, HTTPException
from langchain_core.output_parsers import StrOutputParser
from pydantic import BaseModel
from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.chains import RetrievalQA
from langchain_core.prompts.chat import ChatPromptTemplate
import torch
from typing import List, Dict, Any
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate

# 加载文档并预处理
loader = TextLoader("/data/ai/langchain/doc/剪刀手公司.txt", encoding='gbk')
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(documents)

# 创建嵌入生成器和向量数据库
embedder = HuggingFaceEmbeddings(
    model_name="/data/ai/langchain/sentence_embedding_model/AI-ModelScope/all-MiniLM-L6-v2"
)
vectorstore = FAISS.from_documents(docs, embedding=embedder)
retriever = vectorstore.as_retriever()

# 配置生成模型
model_name = "/data/ai/LLaMA-Factory/model/Qwen2.5-7B-Instruct/qwen/Qwen2___5-7B-Instruct/"
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.bfloat16,
    device_map='auto'
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
gen_pipeline = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_length=4096,
    do_sample=True,
    top_k=10,
    truncation=True
)
llm = HuggingFacePipeline(pipeline=gen_pipeline)

# 定义输入模板，该模板用于生成花束的描述
system_template = """任务:"用户来询问你云剪相关的服务。"
例如:
   - 输入: 云剪服务怎么收费
   - 输出: 您好，我们云剪收费是需要看您是在哪个地区，如果四川地区的话，1条是10元，超过100条的话，单价就是5元，超过1000条的话，优惠更多哦。
现在用户提问```{question}```"""

# 使用模板创建提示
prompt = PromptTemplate(template=system_template, input_variables=["question"])

# 创建LLMChain实例
llm_chain = LLMChain(prompt=prompt, llm=llm)

# 需要生成描述的花束的详细信息
question = "请问云剪服务怎么收费呢"


result = llm_chain.run(question)

parser = StrOutputParser()

print(parser.invoke(result))