"""工具函数模块"""

import PyPDF2
import pdfplumber
import re
from typing import List, Dict, Any
import streamlit as st
from zhipuai import ZhipuAI
from zhipuai.core._errors import APIRequestFailedError
import time
import logging
import fitz  # PyMuPDF

class PDFProcessor:
    """PDF文档处理器"""

    @staticmethod
    def extract_text_pypdf2(pdf_path: str) -> Dict[str, Any]:
        """使用PyPDF2提取PDF文本"""
        try:
            with open(pdf_path, 'rb') as file:
                pdf_reader = PyPDF2.PdfReader(file)
                blocks = []
                for page in pdf_reader.pages:
                    text = page.extract_text()
                    if text:
                        for line in text.splitlines():
                            blocks.append({
                                "text": line,
                                "font_size": 12,  # PyPDF2 无法获取，假设
                                "bold": False,    # PyPDF2 无法获取，假设
                                "x0": 50,         # 同上
                                "page_width": 600
                            })
                full_text = "\n".join([b["text"] for b in blocks])
                return {
                    "text": full_text,
                    "sections": PDFProcessor._split_into_sections(blocks)
                }
        except Exception as e:
            st.error(f"PyPDF2提取失败: {str(e)}")
            return {"text": "", "sections": []}

    @staticmethod
    def extract_text_pdfplumber(pdf_path: str) -> Dict[str, Any]:
        """使用pdfplumber提取PDF文本（更适合复杂格式）"""
        try:
            blocks = []
            with pdfplumber.open(pdf_path) as pdf:
                for page in pdf.pages:
                    page_width = page.width
                    for line in page.extract_words():
                        blocks.append({
                            "text": line['text'],
                            "font_size": line.get('size', 12),
                            "bold": False,  # pdfplumber 不直接提供，加粗信息可从 fontname 推断
                            "x0": line['x0'],
                            "page_width": page_width
                        })
            full_text = "\n".join([b["text"] for b in blocks])
            return {
                "text": full_text,
                "sections": PDFProcessor._split_into_sections(blocks)
            }
        except Exception as e:
            st.error(f"pdfplumber提取失败: {str(e)}")
            return {"text": "", "sections": []}
    
    @staticmethod
    def auto_mark_formulas(text: str) -> str:
        inline_patterns = [
            r'([A-Za-z_]+\s*\([^)]*\)\s*=\s*[^,\n.]+)',   # 函数等式
            r'([A-Za-z_]+\s*=\s*[^,\n.]+)',               # 变量等式
            r'([∑∏∫][^,\n.]{5,})',                        # 求和连乘积分
            r'([A-Za-z]+\([^)]*\)\s*→\s*[A-Za-z]+\([^)]*\))', # 函数映射
            r'(P\([^)]*\)\s*=\s*[^,\n.]+)',               # 概率公式
            r'(E\[[^]]+\]\s*=\s*[^,\n.]+)',               # 期望公式
        ]
        def looks_like_formula(s: str) -> bool:
            s = s.strip()
            if len(s) < 5:  # 太短排除
                return False

            if '=' not in s:
                return False

            # 排除类似参数设置的简单赋值，比如 "stride = 16"
            if re.fullmatch(r'[A-Za-z_ ]{1,20}=\s*\d+(\.\d+)?', s):
                return False

            math_symbols = ['+', '-', '*', '/', '^', '∫', '∑', '∏', '\\', '_', '{', '}', '→', '[', ']', '◦']
            # 判断是否含任一数学符号
            if any(sym in s for sym in math_symbols):
                return True

            # 否则依然返回 True 认为它是公式（比较宽松）
            return True

        def wrap_inline(m):
            s = m.group(0)
            if s.startswith('$') and s.endswith('$'):
                return s
            if looks_like_formula(s):
                return f'${s}$'
            return s

        for pattern in inline_patterns:
            text = re.sub(pattern, wrap_inline, text)
        return text

    @staticmethod
    def extract_text_fitz(pdf_path: str) -> Dict[str, Any]:
        """使用 PyMuPDF 提取 PDF 文本和样式信息（含页眉页脚剔除）"""
        try:
            doc = fitz.open(pdf_path)
            blocks = []
            header_candidates = {}  # 统计页眉候选文本频次

            # 第1轮：预扫所有页，统计页眉文本出现频率
            for page in doc:
                page_height = page.rect.height
                dict_blocks = page.get_text("dict")["blocks"]

                for b in dict_blocks:
                    if "lines" not in b:
                        continue

                    block_y0 = b.get("bbox", [0, 0, 0, 0])[1]

                    # 判断是否页眉（位于页面顶部）
                    if block_y0 < 70:
                        header_text = " ".join(
                            span.get("text", "").strip()
                            for line in b.get("lines", [])
                            for span in line.get("spans", [])
                            if span.get("text", "").strip()
                        )
                        if header_text:
                            header_candidates[header_text] = header_candidates.get(header_text, 0) + 1

            # 过滤掉重复出现的页眉文本（例如出现超过 3 页）
            repeated_headers = {text for text, count in header_candidates.items() if count >= 3}

            # 第2轮：正式提取正文 block，过滤页眉/页脚
            for page in doc:
                page_width = page.rect.width
                page_height = page.rect.height
                dict_blocks = page.get_text("dict")["blocks"]

                for b in dict_blocks:
                    if "lines" not in b:
                        continue

                    block_y0 = b.get("bbox", [0, 0, 0, 0])[1]

                    # 跳过页脚
                    if page_height - block_y0 < 50:
                        continue

                    for line in b["lines"]:
                        line_text = ""
                        max_font_size = 0
                        is_bold = False
                        for span in line["spans"]:
                            text = span.get("text", "").strip()
                            if not text:
                                continue
                            line_text += text + " "
                            max_font_size = max(max_font_size, span.get("size", 10))
                            if "Bold" in span.get("font", ""):
                                is_bold = True

                        final_text = line_text.strip()
                        # 跳过重复页眉
                        if final_text and final_text not in repeated_headers:
                            blocks.append({
                                "text": final_text,
                                "font_size": max_font_size,
                                "bold": is_bold,
                                "x0": line["bbox"][0],
                                "page_width": page_width
                            })

            full_text = "\n".join([b["text"] for b in blocks])
            full_text_marked = PDFProcessor.auto_mark_formulas(full_text)

            sections = PDFProcessor._split_into_sections(blocks)
            for sec in sections:
                sec['content'] = PDFProcessor.auto_mark_formulas(sec['content'])

            return {
                "text": full_text_marked,
                "sections": sections
            }

        except Exception as e:
            st.error(f"PyMuPDF提取失败: {str(e)}")
            return {"text": "", "sections": []}

    @staticmethod
    def extract_text_hybrid(pdf_path: str) -> Dict[str, Any]:
        """优先使用 PyMuPDF 提取，如果失败再尝试 pdfplumber 和 PyPDF2"""
        result = PDFProcessor.extract_text_fitz(pdf_path)
        if not result["text"].strip():
            result = PDFProcessor.extract_text_pdfplumber(pdf_path)
        if not result["text"].strip():
            result = PDFProcessor.extract_text_pypdf2(pdf_path)
        return result

    @staticmethod
    def _merge_blocks(blocks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        merged_blocks = []
        i = 0
        while i < len(blocks):
            block = blocks[i]
            text = block.get("text", "").strip()
            font_size = block.get("font_size", 10)

            # 断字合并优先处理
            if text.endswith('-') and i + 1 < len(blocks):
                next_block = blocks[i + 1]
                next_text = next_block.get("text", "").lstrip()
                merged_text = text[:-1] + next_text
                merged_block = dict(block)
                merged_block["text"] = merged_text
                merged_blocks.append(merged_block)
                i += 2
                continue

            # 基于标点符号合并，但要避免合并进入章节标题块
            while (not text.endswith(('.', '。', '?', '？', '!', '！'))) and i + 1 < len(blocks):
                next_block = blocks[i + 1]
                next_text = next_block.get("text", "").strip()
                next_font_size = next_block.get("font_size", 10)

                # 不合并 "Abstract"
                if (next_text.lower() == "abstract") or (next_text.lower() == "摘要"):
                    break
                # 避免合并空行或字体差异很大块（极可能是标题）
                if (len(next_text) == 0)or (abs(next_font_size - font_size) >= 0.5):
                    break

                # 检测是否空两行以上
                empty_line_count = 0
                j = i + 1
                while j < len(blocks) and blocks[j].get("text", "").strip() == "":
                    empty_line_count += 1
                    j += 1
                if empty_line_count >= 2:
                    break

                text = text + " " + next_text
                i += 1
            
            def clean_chinese_spacing(text: str) -> str:
                # 删除中文之间的半角/全角空格（含制表符、全角空格）
                text = re.sub(r'(?<=[\u4e00-\u9fa5])[\s\u3000]+(?=[\u4e00-\u9fa5])', '', text)
                # # 删除中文与数字/英文之间的多余空格（视情况选择性使用）
                # text = re.sub(r'(?<=[\u4e00-\u9fa5])\s+(?=[a-zA-Z0-9])', '', text)
                # text = re.sub(r'(?<=[a-zA-Z0-9])\s+(?=[\u4e00-\u9fa5])', '', text)
                return text
            
            text = clean_chinese_spacing(text)

            merged_block = dict(block)
            merged_block["text"] = text
            merged_blocks.append(merged_block)
            i += 1

        return merged_blocks

    @staticmethod
    def _split_into_sections(blocks: List[Dict[str, Any]]) -> List[Dict[str, str]]:
        blocks = PDFProcessor._merge_blocks(blocks)

        sections = []
        current_section_lines = []
        current_title = None

        # 找章节标题参考字号
        candidates = []
        for idx, block in enumerate(blocks):
            text = block.get("text", "").lower()
            if any(kw in text for kw in ["abstract", "摘要", "引言", "续论"]):
                candidates.append((idx, block.get("font_size", 12)))
        if candidates:
            start_index, section_title_font_size = max(candidates, key=lambda x: x[1])
        else:
            font_sizes = sorted(set(b.get("font_size", 12) for b in blocks[:100]), reverse=True)
            section_title_font_size = font_sizes[1] if len(font_sizes) >= 2 else font_sizes[0]
            start_index = 0

        def is_section_title(text: str, font_size: float) -> bool:
            if abs(font_size - section_title_font_size) > 0.5:
                return False
            if len(text.strip()) > 100:
                return False
            if text.strip().endswith("."):
                return False
            if text.strip().endswith("。"):
                return False
            if text.count(".") > 1:
                return False
            if "\n" in text:
                return False
            
            # 去掉允许的字符（中文、英文、数字、空格、部分常见标点）
            filtered = re.sub(r'[a-zA-Z0-9\u4e00-\u9fa5\s\-–:：、，,\.]', '', text)

            # 如果剩下的符号超过50%文本长度，说明文本以符号为主，不是标题
            if len(filtered) > len(text) / 2:
                return False

            # 另外，排除空括号等
            if re.fullmatch(r'[\(\)\[\]\{\}\s]*', text):
                return False
            return True

        i = start_index
        n = len(blocks)

        while i < n:
            block = blocks[i]
            text = block.get("text", "").strip()
            font_size = block.get("font_size", 10)

            # 先尝试和下一行合并看是不是章节标题
            if i + 1 < n:
                next_block = blocks[i + 1]
                next_text = next_block.get("text", "").strip()
                next_font_size = next_block.get("font_size", 10)

                # 条件：字号接近，且两行文本都不长，合并后判断
                if (abs(font_size - section_title_font_size) <= 0.5 and
                    abs(font_size - next_font_size) <= 0.5 and
                    len(text) < 40 and len(next_text) < 40):

                    merged_text = text + next_text
                    if is_section_title(merged_text, font_size):
                        # 先存前一节内容
                        if current_section_lines and current_title is not None:
                            sections.append({
                                "title": current_title,
                                "content": "\n".join(current_section_lines).strip()
                            })
                        current_title = merged_text
                        current_section_lines = []
                        i += 2
                        continue

            # 单行判断章节标题
            if is_section_title(text, font_size):
                if current_section_lines and current_title is not None:
                    sections.append({
                        "title": current_title,
                        "content": "\n".join(current_section_lines).strip()
                    })
                current_title = text
                current_section_lines = []
            else:
                if current_title is not None:
                    current_section_lines.append(text)
            i += 1

        if current_section_lines and current_title is not None:
            sections.append({
                "title": current_title,
                "content": "\n".join(current_section_lines).strip()
            })

        if not sections:
            sections.append({
                "title": "Full Document",
                "content": "\n".join([b.get("text", "") for b in blocks])
            })

        return sections

class TextProcessor:
    """文本处理器"""

    def clean_text(self, text: str) -> str:
        """清理文本"""
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        # 移除特殊字符但保留数学符号
        text = re.sub(r'[^\w\s\-=+*/(){}[\]<>.,;:!?$\\∑∏∫αβγδεζηθικλμνξοπρστυφχψω]', ' ', text)
        return text.strip()

    def split_into_sections(self, text: str) -> List[Dict[str, str]]:
        """将文本分割为章节，返回包含标题和内容的字典列表"""
        sections = []
        lines = text.split('\n')
        current_section_lines = []
        current_title = "Introduction"  # 默认标题
        
        section_keywords = ['abstract', 'introduction', 'method', 'methodology', 'result', 'results', 
                        'conclusion', 'discussion', 'reference', 'acknowledgment', 'related work']
        
        for line in lines:
            line = line.strip()
            if not line:
                continue
                
            # 检测章节标题
            is_section_title = False
            if len(line) < 100:
                # 检查是否为全大写标题
                if line.isupper() and len(line.split()) <= 5:
                    is_section_title = True
                # 检查是否包含关键词
                elif any(keyword in line.lower() for keyword in section_keywords):
                    # 进一步检查是否像标题（长度不太长，词数不多）
                    if len(line.split()) <= 8:
                        is_section_title = True
                # 检查数字编号的标题（如 "1. Introduction", "2.1 Method"）
                elif line.split()[0].replace('.', '').isdigit() and len(line.split()) <= 6:
                    is_section_title = True
            
            if is_section_title:
                # 保存上一个章节
                if current_section_lines:
                    sections.append({
                        "title": current_title,
                        "content": '\n'.join(current_section_lines).strip()
                    })
                    current_section_lines = []
                current_title = line
            else:
                current_section_lines.append(line)
        
        # 添加最后一个章节
        if current_section_lines:
            sections.append({
                "title": current_title,
                "content": '\n'.join(current_section_lines).strip()
            })
        
        # 如果没有识别到章节，将整个文本作为一个章节
        if not sections:
            sections.append({
                "title": "Full Document",
                "content": text.strip()
            })
        
        return sections

    # @staticmethod
    def extract_formulas_from_text(self, text: str) -> List[Dict]:
        """从文本中提取公式"""
        formulas = []
        
        # 匹配LaTeX风格的公式 (如 $...$, $$...$$, \begin{equation}...\end{equation})
        latex_patterns = [
            r'\$\$([^$]+)\$\$',  # $$...$$
            r'\$([^$\n]+)\$',    # $...$
            r'\\begin\{equation\}(.*?)\\end\{equation\}',  # \begin{equation}...\end{equation}
            r'\\begin\{align\}(.*?)\\end\{align\}',        # \begin{align}...\end{align}
        ]
        
        for pattern in latex_patterns:
            matches = re.findall(pattern, text, re.DOTALL)
            for match in matches:
                if match.strip():
                    formulas.append({
                        'formula': match.strip(),
                        'type': 'latex',
                        'context': self._get_context(text, match)
                    })
        
        # 匹配常见的数学表达式模式
        math_patterns = [
            r'([A-Za-z_]+\([^)]+\)\s*=\s*[^,\n.]{5,})',  # 函数等式
            r'([A-Za-z_]+\s*=\s*[^,\n.]{10,})',          # 变量等式 (增加最小长度)
            r'(∑[^,\n.]{5,})',                            # 求和符号
            r'(∏[^,\n.]{5,})',                            # 连乘符号
            r'(∫[^,\n.]{5,})',                            # 积分符号
            r'([A-Za-z]+\([^)]*\)\s*→\s*[A-Za-z]+\([^)]*\))', # 函数映射
            r'(P\([^)]+\)\s*=\s*[^,\n.]+)',              # 概率公式
            r'(E\[[^]]+\]\s*=\s*[^,\n.]+)',              # 期望公式
        ]
        
        for pattern in math_patterns:
            matches = re.findall(pattern, text)
            for match in matches:
                if len(match) > 8:  # 过滤过短的匹配
                    formulas.append({
                        'formula': match,
                        'type': 'text',
                        'context': self._get_context(text, match)
                    })
        
        # 去重 (基于公式内容)
        seen_formulas = set()
        unique_formulas = []
        for formula in formulas:
            formula_key = formula['formula'].lower().replace(' ', '')
            if formula_key not in seen_formulas:
                seen_formulas.add(formula_key)
                unique_formulas.append(formula)
        
        return unique_formulas

    def _get_context(self, text: str, formula: str) -> str:
        """获取公式的上下文"""
        try:
            index = text.find(formula)
            if index != -1:
                start = max(0, index - 150)
                end = min(len(text), index + len(formula) + 150)
                context = text[start:end].strip()
                # 清理上下文
                context = re.sub(r'\s+', ' ', context)  # 合并多个空格
                return context
        except:
            pass
        return "无法获取上下文"

    def chunk_text(self, text: str, max_tokens: int = 8000) -> List[str]:
        """将文本分块处理"""
        # 简单的分块逻辑，按字符数估算token数（1 token ≈ 4 chars）
        max_chars = max_tokens * 4
        chunks = []
        
        if len(text) <= max_chars:
            return [text]
        
        # 按段落分割
        paragraphs = text.split('\n\n')
        current_chunk = []
        current_length = 0
        
        for paragraph in paragraphs:
            if current_length + len(paragraph) > max_chars:
                if current_chunk:
                    chunks.append('\n\n'.join(current_chunk))
                    current_chunk = [paragraph]
                    current_length = len(paragraph)
                else:
                    # 单个段落过长，强制分割
                    chunks.append(paragraph[:max_chars])
                    current_chunk = [paragraph[max_chars:]] if len(paragraph) > max_chars else []
                    current_length = len(current_chunk[0]) if current_chunk else 0
            else:
                current_chunk.append(paragraph)
                current_length += len(paragraph)
        
        if current_chunk:
            chunks.append('\n\n'.join(current_chunk))
        
        return chunks

class ValidationUtils:
    """验证工具"""
    
    @staticmethod
    def validate_pdf_file(file) -> bool:
        """验证PDF文件"""
        if file is None:
            return False
        
        # 检查文件扩展名
        if not file.name.lower().endswith('.pdf'):
            st.error("请上传PDF格式的文件")
            return False
        
        # 检查文件大小
        if file.size > 10 * 1024 * 1024:  # 10MB
            st.error("文件大小不能超过10MB")
            return False
        
        return True
    
    @staticmethod
    def validate_api_key(api_key: str) -> bool:
        """验证API密钥格式"""
        if not api_key or api_key == "your_api_key_here":
            return False
        return True
    
    @staticmethod
    def validate_api_key_with_request(api_key: str) -> (bool, str):
        """通过发送测试请求验证API Key有效性，返回验证结果和错误信息"""
        if not ValidationUtils.validate_api_key(api_key):
            return False, "API Key格式不正确"
        
        try:
            # 创建一个临时客户端
            client = ZhipuAI(api_key=api_key)
            
            # 发送一个更简单的测试请求
            start_time = time.time()
            response = client.chat.completions.create(
                model="glm-4-flash",
                messages=[{"role": "user", "content": "你好，请回复'验证成功'"}],
                max_tokens=5,
                temperature=0.1
            )
            
            # 计算响应时间
            response_time = time.time() - start_time
            
            # 检查响应内容
            if (response.choices and 
                response.choices[0].message.content and 
                "验证成功" in response.choices[0].message.content):
                return True, f"验证成功！响应时间: {response_time:.2f}秒"
            
            return False, f"API返回了意外响应: {response.choices[0].message.content}"
            
        except APIRequestFailedError as e:
            # 获取详细的错误信息
            error_details = f"API请求失败: 错误代码: {e.code}"
            if hasattr(e, 'message'):
                error_details += f", 消息: {e.message}"
            if hasattr(e, 'body'):
                error_details += f", 响应体: {e.body}"
                
            return False, error_details
            
        except Exception as e:
            # 记录详细的错误信息
            error_details = f"验证过程中发生未知错误: {str(e)}"
            logging.exception("API Key验证异常")
            return False, error_details