import os
from dotenv import load_dotenv
from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate

load_dotenv()

def create_qa_chain(file_path: str):
    # 1. 加载文档
    if file_path.endswith(".pdf"):
        loader = PyPDFLoader(file_path)
    elif file_path.endswith(".txt"):
        loader = TextLoader(file_path, encoding="utf-8")
    else:
        raise ValueError("仅支持 .txt 或 .pdf 文件")
    
    docs = loader.load()

    # 2. 分块
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    splits = text_splitter.split_documents(docs)

    # 3. Embedding + 向量库
    embeddings = DashScopeEmbeddings(
        model=os.getenv("EMBEDDING_MODEL"),
        dashscope_api_key=os.getenv("DASHSCOPE_API_KEY")
    )
    vectorstore = FAISS.from_documents(splits, embeddings)

    # 4. LLM
    llm = Tongyi(
        model_name=os.getenv("MODEL_NAME"),
        dashscope_api_key=os.getenv("DASHSCOPE_API_KEY")
    )

    # 5. 构建 QA Chain
    prompt_template = """使用以下上下文回答问题。如果你不知道答案，就说不知道。
    上下文: {context}
    问题: {question}
    回答:"""
    PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])

    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        retriever=vectorstore.as_retriever(),
        chain_type_kwargs={"prompt": PROMPT}
    )
    return qa_chain
