import os
import openai
from langchain_community.document_loaders import TextLoader  # 保留文本加载器（如有需要）
from langchain.chains import RetrievalQA
from langchain_community.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain.schema import Document
from typing import List, Optional


class DocxQA:
    def __init__(self, openai_api_key: str, openai_base_url: str, chunk_size: int = 1200, chunk_overlap: int = 200):
        self.openai_api_key = openai_api_key
        self.openai_base_url = openai_base_url
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.qa_chain = None
        openai.api_key = openai_api_key
        openai.api_base = openai_base_url

    def read_docx(self, file_path: str) -> str:
        """使用 python-docx 直接读取docx内容（无需 docx2txt）"""
        try:
            from docx import Document  # 局部导入，避免命名冲突
            doc = Document(file_path)
            full_text = [para.text for para in doc.paragraphs]
            return "\n".join(full_text)
        except Exception as e:
            print(f"读取文件失败: {str(e)}")
            return ""

    def process_documents(self, file_paths: List[str]) -> List[Document]:
        documents = []
        for file_path in file_paths:
            if not os.path.exists(file_path):
                print(f"文件不存在: {file_path}")
                continue
            file_ext = os.path.splitext(file_path)[1].lower()
            if file_ext == '.docx':
                content = self.read_docx(file_path)
                if content:
                    doc = Document(page_content=content, metadata={"source": file_path})
                    documents.append(doc)
            else:
                print(f"不支持的文件类型: {file_ext}")
        return documents

    # 以下代码与之前一致，无需修改
    def create_qa_chain(self, documents: List[Document]) -> None:
        if not documents:
            print("没有文档可供处理")
            return
        text_splitter = CharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            separators=["\n\n", "\n", " ", ""]
        )
        texts = text_splitter.split_documents(documents)
        embeddings = OpenAIEmbeddings(
            openai_api_key=self.openai_api_key,
            openai_api_base=self.openai_base_url
        )
        vectorstore = Chroma.from_documents(texts, embeddings)
        self.qa_chain = RetrievalQA.from_chain_type(
            llm=OpenAI(
                openai_api_key=self.openai_api_key,
                openai_api_base=self.openai_base_url,
                model_name="gpt-3.5-turbo",
                temperature=0.1
            ),
            chain_type="stuff",
            retriever=vectorstore.as_retriever()
        )

    def ask_question(self, question: str) -> Optional[str]:
        if not self.qa_chain:
            print("问答链未初始化，请先处理文档")
            return None
        return self.qa_chain.run(question)


def main():
    openai_api_key = "sk-ThA0WI5NVgQ1xYUf8BriyRJfIqBWRvbV5rrzmAWBvdjxsnWP"
    openai_base_url = "https://yibuapi.com/v1"
    docx_qa = DocxQA(openai_api_key, openai_base_url)

    filename = "第3章 数据获取.docx"
    # 注意：路径中使用双反斜杠或原始字符串（r"路径"）
    desktop_path = r"D:\桌面\竞赛\挑战杯+人工智能"
    file_paths = [os.path.join(desktop_path, filename)]

    documents = docx_qa.process_documents(file_paths)
    if not documents:
        print("未找到有效文档，程序退出")
        return

    docx_qa.create_qa_chain(documents)
    print("\n=== 文档问答系统启动 ===")
    print("输入问题进行提问，输入'q'退出")
    while True:
        user_question = input("\n问题: ").strip()
        if user_question.lower() == 'q':
            break
        if not user_question:
            continue
        answer = docx_qa.ask_question(user_question)
        print(f"回答: {answer}")


if __name__ == "__main__":
    main()