import os
from docx import Document
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from typing import List, Optional, Dict, Tuple


class DocxQA:
    def __init__(self, openai_api_key: str, chunk_size: int = 1000, chunk_overlap: int = 200):
        """初始化文档问答系统

        Args:
            openai_api_key: OpenAI API密钥
            chunk_size: 文本分块大小
            chunk_overlap: 文本分块重叠大小
        """
        self.openai_api_key = openai_api_key
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.qa_chain = None
        self.conversation_history = []  # 存储对话历史

    def read_docx(self, file_path: str) -> str:
        """读取DOCX文件内容

        Args:
            file_path: DOCX文件路径

        Returns:
            文件内容文本
        """
        try:
            doc = Document(file_path)
            full_text = []
            for para in doc.paragraphs:
                full_text.append(para.text)
            return "\n".join(full_text)
        except Exception as e:
            print(f"读取文件失败: {str(e)}")
            return ""

    def process_documents(self, file_paths: List[str]) -> List[Document]:
        """处理多个文档并转换为Document列表

        Args:
            file_paths: 文档文件路径列表

        Returns:
            Document列表
        """
        documents = []

        for file_path in file_paths:
            if not os.path.exists(file_path):
                print(f"文件不存在: {file_path}")
                continue

            file_ext = os.path.splitext(file_path)[1].lower()

            if file_ext == '.docx':
                content = self.read_docx(file_path)
                if content:
                    doc = Document(page_content=content, metadata={"source": file_path})
                    documents.append(doc)
            else:
                print(f"不支持的文件类型: {file_ext}")

        return documents

    def create_qa_chain(self, documents: List[Document]) -> None:
        """创建问答链

        Args:
            documents: Document列表
        """
        if not documents:
            print("没有文档可供处理")
            return

        # 文本分块
        text_splitter = CharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)
        texts = text_splitter.split_documents(documents)

        # 创建向量存储
        embeddings = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
        vectorstore = Chroma.from_documents(texts, embeddings)

        # 创建问答链
        self.qa_chain = RetrievalQA.from_chain_type(
            llm=OpenAI(openai_api_key=self.openai_api_key),
            chain_type="stuff",
            retriever=vectorstore.as_retriever()
        )

    def ask_question(self, question: str) -> Optional[str]:
        """向文档提问，支持基于对话历史的追问

        Args:
            question: 问题文本

        Returns:
            回答文本
        """
        if not self.qa_chain:
            print("问答链未初始化，请先处理文档")
            return None

        # 构建带上下文的问题（如果有对话历史）
        if self.conversation_history:
            context = "\n".join([f"Q: {q}\nA: {a}" for q, a in self.conversation_history])
            full_question = f"基于以下对话历史进行回答：\n{context}\n\n当前问题：{question}"
        else:
            full_question = question

        # 记录问题到对话历史
        self.conversation_history.append((question, ""))  # 先记录问题，答案稍后更新

        # 获取回答
        answer = self.qa_chain.run(full_question)

        # 更新对话历史中的答案
        self.conversation_history[-1] = (question, answer)

        return answer

    def get_conversation_history(self) -> List[Tuple[str, str]]:
        """获取对话历史

        Returns:
            对话历史列表，每个元素是(问题, 回答)元组
        """
        return self.conversation_history

    def clear_conversation_history(self) -> None:
        """清除对话历史"""
        self.conversation_history = []

    def interactive_qa(self, max_rounds: int = 10) -> None:
        """交互式问答会话，支持指定最大追问轮数

        Args:
            max_rounds: 最大问答轮数
        """
        if not self.qa_chain:
            print("问答链未初始化，请先处理文档")
            return

        print("=== 文档问答系统已就绪 ===")
        print(f"你可以进行最多 {max_rounds} 轮问答")
        print("输入问题进行提问，输入'q'或'quit'退出，输入'clear'清除对话历史")

        round_count = 0
        while round_count < max_rounds:
            round_count += 1
            print(f"\n轮次 {round_count}/{max_rounds}")

            question = input("问题: ").strip()

            if question.lower() == 'q' or question.lower() == 'quit':
                break

            if question.lower() == 'clear':
                self.clear_conversation_history()
                print("对话历史已清除")
                continue

            if not question:
                print("问题不能为空，请重新输入")
                round_count -= 1  # 不计入轮次
                continue

            answer = self.ask_question(question)
            if answer:
                print(f"回答: {answer}")

        print(f"\n=== 已达到最大轮数 {max_rounds}，会话结束 ===")


def main():
    # 设置OpenAI API密钥
    openai_api_key = "你的API密钥"  # 请替换为你的实际API密钥

    # 创建文档问答系统实例，设置每轮对话最多可追问10次
    docx_qa = DocxQA(openai_api_key=openai_api_key)

    # 指定要处理的文档路径
    desktop_path = os.path.expanduser("~/Desktop")
    file_paths = [
        os.path.join(desktop_path, "第3章 数据获取.docx"),
        # 可以添加更多文件路径
    ]

    # 处理文档
    documents = docx_qa.process_documents(file_paths)

    # 创建问答链
    docx_qa.create_qa_chain(documents)

    # 启动交互式问答，设置最大轮数
    docx_qa.interactive_qa(max_rounds=10)


if __name__ == "__main__":
    main()