from ..domain import QuestionGenerateRequest
from pathlib import Path
import os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from  langchain_ollama import ChatOllama
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import pandas as pd
from fastapi.responses import FileResponse,JSONResponse
import asyncio
import os
from fastapi import FastAPI, UploadFile, File, Form, HTTPException, status

from pathlib import Path
from aiofiles import open as aio_open
import fitz  # PyMuPDF
from docx import Document
import zipfile
from ..config import config


spliter = RecursiveCharacterTextSplitter(
    chunk_size=1000,  # 每个块的最大字符长度
    chunk_overlap=200,  # 块之间的重叠字符数
    length_function=len,  # 使用 len 函数计算长度
    is_separator_regex=False)  # 分隔符不作为正则表达式处理

class QuestionGanerateUtils:
    def __init__(self,request: QuestionGenerateRequest):
        self.data_path = Path(r'./app/data')
        self.output_path = Path(r'./app/output')
        if not not request.model:
            model = request.model
        else:
            model = config['llm']['model']
        self.llm = ChatOllama(
            # model="deepseek-r1:32b",
            #model="qwen2.5:14b",
            model=model,
            base_url=config['llm']['base_url'],
            temperature=request.m_config.temperature,
            top_p=request.m_config.top_p,
            max_tokens=request.m_config.max_tokens
        )

        self.qa=[]
        self.excel_qa = {
            "question":[],
            "answer":[]
        }
        session_id = request.session_id
        self.data_path = self.data_path.joinpath(session_id)
        self.output_path = self.output_path.joinpath(session_id)
        os.makedirs(self.output_path, exist_ok=True)
        os.makedirs(self.data_path, exist_ok=True)


    def getQuestionPrompt(self):

        return ChatPromptTemplate.from_messages(
            [
                (
                    "system", "你是一个根据文本内容生成对应问题的生成器"),
                (
                    "human", """
                    请根据以下文章内容生成3-5个可以直接用文章信息回答的问题，要求：
                    1. 问题清晰明确，涵盖文章关键信息
                    2. 避免重复和笼统的问题
                    3. 使用中文提问
                    4. 确保每个问题都能直接从文章中找到答案
                    5. 每个问题都要有文章主语
                    6. 问题格式为：
                        --1.海平面上升是从哪一年开始被记录的，并且到目前为止上升了多高？
                        --2.由于气候变化导致冰川融化，哪个地区的冰川退缩已经对亚洲数亿人的供水产生了威胁？

                    文章内容：
                    {input}
                    """)
            ]
        )

    def getAnserPrompt(self):

        return ChatPromptTemplate.from_messages(
            [
                (
                    "system", "你是一个根据文章内容回答问题的工具"),
                (
                    "human", """
                    请根据以下文章内容生成回答问题，要求：
                    按顺序输出回答即可不需要再输出问题,回答要完整包括主语
                    回答的格式：
                    --根据IPCC第六次评估报告，2011-2020年全球地表温度比1850-1900年升高了1.1°C。
                    --由于气候变化导致冰川融化，喜马拉雅地区的冰川退缩已经对亚洲数亿人的供水产生了威胁。
                    文章内容：
                    {input}
                    问题：
                    {question}
                    """)
            ]
        )


    async def question(self):


        for root, dirs, files in os.walk(self.data_path):
            if len(files) == 0:

                raise HTTPException(
                    status_code=status.HTTP_400_BAD_REQUEST,
                    detail="未上传任何文件"
                )
            for file in files:

                if file.endswith(".txt"):#在上传文档时对文本进行解析生成相应的txt文件
                    file_path = os.path.join(root, file)
                    async with aio_open(file_path, "r", encoding="utf-8", errors="ignore") as file:
                    #with open(file_path, "r", encoding="utf-8", errors="ignore") as file:
                        content = await file.read()
                elif file.endswith(".docx") or file.endswith(".pdf"):
                    file_path = os.path.join(root, file)
                    content,_ = await self.read_file(file_path)
                else:
                    continue
                #chunks = spliter.split_text(content)
                chunks = await asyncio.to_thread(spliter.split_text,content)

                for chunk in chunks:

                    question_prompt = self.getQuestionPrompt()

                    chain = question_prompt | self.llm
                    #question = chain.invoke({"input":chunk})
                    question =  await asyncio.to_thread(chain.invoke, {"input":chunk})
                    ans = await self.anser(question,chunk)
                    #print(str(question.content))
                    questions = question.content.split('--')
                    anss = ans.split('--')
                    #print(questions,anss)
                    for i in range(len(questions)):
                        if not not questions[i]:
                            #self.qa.append(QAA(questions[i],anss[i]))
                            self.excel_qa["question"].append(questions[i])
                            self.excel_qa["answer"].append(anss[i])
            await self.create_excel()
            # 返回文件响应
            return FileResponse(self.output_path.joinpath("example.xlsx"),
                                media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
                                filename="example.xlsx")


    async def anser(self,question,chunk):

        question_prompt = self.getAnserPrompt()

        chain = question_prompt | self.llm
        #ans = chain.invoke({"input": chunk,"question":question})
        ans = await asyncio.to_thread(chain.invoke,{"input": chunk,"question":question})
        return ans.content

    async def create_excel(self):
        df = pd.DataFrame(self.excel_qa)

        # 将 DataFrame 保存到 Excel 文件
        #df.to_excel(self.output_path.joinpath("example.xlsx"), index=False)
        output_file = self.output_path.joinpath("example.xlsx")
        await asyncio.to_thread(df.to_excel, output_file, index=False)


    async def read_file(self,file_path):
        if file_path.endswith(".pdf"):
            return await self.read_pdf(file_path)
        elif file_path.endswith(".docx"):
            return await self.read_docx(file_path)
        else:
            raise ValueError("Unsupported file format")

    async def read_pdf(self,file_path):
        loop = asyncio.get_running_loop()
        return await loop.run_in_executor(None, self._read_pdf_sync, file_path)

    def _read_pdf_sync(self,file_path):
        document = fitz.open(file_path)
        content = []
        images_content = []
        for page_num in range(len(document)):
            page = document.load_page(page_num)
            if not not page.get_text():
                content.append(page.get_text())

                # 提取图片
            image_list = page.get_images(full=True)
            for image_index, img in enumerate(image_list):
                xref = img[0]
                base_image = document.extract_image(xref)
                image_bytes = base_image["image"]
                images_content.append(image_bytes)
        document.close()
        return "\n".join(content),  images_content

    async def read_docx(self,file_path):
        loop = asyncio.get_running_loop()
        return await loop.run_in_executor(None, self._read_docx_sync, file_path)

    def _read_docx_sync(self,file_path):
        doc = Document(file_path)
        content = []
        for para in doc.paragraphs:
            if not not para.text:
                content.append(para.text)
            # 提取图片
        images_content = []
        with zipfile.ZipFile(file_path, 'r') as zip_ref:
            for file in zip_ref.namelist():
                if file.startswith('word/media/'):
                    image_bytes = zip_ref.read(file)
                    images_content.append(image_bytes)


        return "\n".join(content), images_content




if __name__ == '__main__':


    qgu = QuestionGanerateUtils(None)
    qgu.question(None)

