"""
科研文献处理工具集
专门用于处理PDF格式的学术论文，提供结构识别、内容提取、元数据解析等功能
"""

import os
import re
import json
import fitz  # PyMuPDF
from typing import List, Dict, Any, Optional
from datetime import datetime

# 导入基础工具系统
from local_tools import GLOBAL_TOOL_SYSTEM, tool

class PDFStructure:
    """PDF文档结构分析器"""

    # 常见章节标题关键词
    SECTION_KEYWORDS = {
        'abstract': ['abstract', '摘要', '概要'],
        'introduction': ['introduction', 'intro', '引言', '介绍'],
        'related_work': ['related work', 'literature review', '相关工作', '文献综述'],
        'methodology': ['methodology', 'methods', 'method', '方法', '方法论'],
        'experiments': ['experiments', 'experimental', '实验', '试验'],
        'results': ['results', 'findings', '结果', '发现'],
        'discussion': ['discussion', '讨论'],
        'conclusion': ['conclusion', 'conclusions', '结论', '总结'],
        'references': ['references', 'reference', '参考文献', '引用']
    }

    def __init__(self):
        self.section_patterns = self._build_section_patterns()

    def _build_section_patterns(self):
        """构建章节识别模式"""
        patterns = {}
        for section, keywords in self.SECTION_KEYWORDS.items():
            # 创建正则表达式模式
            pattern = r'(?:^|\n)\s*(\d*\.?\d*)\s*(' + '|'.join(keywords) + r')\s*\n'
            patterns[section] = re.compile(pattern, re.IGNORECASE)
        return patterns

    def identify_sections(self, text: str) -> Dict[str, List[int]]:
        """识别文本中的章节位置"""
        sections = {}
        lines = text.split('\n')

        for section, pattern in self.section_patterns.items():
            matches = []
            for i, line in enumerate(lines):
                if pattern.search(line):
                    matches.append(i)
            if matches:
                sections[section] = matches

        return sections

    def extract_section_content(self, text: str, start_line: int, end_line: Optional[int] = None) -> str:
        """提取指定行范围的内容"""
        lines = text.split('\n')
        if end_line is None:
            return '\n'.join(lines[start_line:])
        return '\n'.join(lines[start_line:end_line])

class ResearchPaperAnalyzer:
    """科研论文分析器"""

    def __init__(self):
        self.structure = PDFStructure()

    def extract_text_from_pdf(self, pdf_path: str, max_pages: int = None) -> str:
        """从PDF提取文本内容"""
        try:
            doc = fitz.open(pdf_path)
            text_parts = []

            pages_to_extract = min(len(doc), max_pages) if max_pages else len(doc)

            for page_num in range(pages_to_extract):
                page = doc.load_page(page_num)
                text = page.get_text()
                text_parts.append(text)

            doc.close()
            return '\n'.join(text_parts)

        except Exception as e:
            raise Exception(f"PDF文本提取失败: {str(e)}")

    def extract_metadata_from_text(self, text: str) -> Dict[str, Any]:
        """从文本中提取元数据"""
        metadata = {
            'title': self._extract_title(text),
            'authors': self._extract_authors(text),
            'year': self._extract_year(text),
            'keywords': self._extract_keywords(text),
            'doi': self._extract_doi(text),
            'journal': self._extract_journal(text)
        }
        return metadata

    def _extract_title(self, text: str) -> Optional[str]:
        """提取论文标题"""
        # 通常在文档开头，大字体，居中
        lines = text.strip().split('\n')

        # 查找可能是标题的行（大写比例高，长度适中）
        for i, line in enumerate(lines[:20]):  # 前20行
            line = line.strip()
            if len(line) > 20 and len(line) < 200:
                # 检查大写字母比例
                upper_ratio = sum(1 for c in line if c.isupper()) / len(line)
                if upper_ratio > 0.3:  # 大写字母比例较高
                    return line

        # 备用策略：返回第一行非空内容
        for line in lines[:10]:
            line = line.strip()
            if line and len(line) > 10:
                return line

        return None

    def _extract_authors(self, text: str) -> List[str]:
        """提取作者信息"""
        authors = []
        lines = text.split('\n')

        # 查找包含作者信息的行（通常包含人名模式）
        name_pattern = r'([A-Z][a-z]+\s+[A-Z][a-z]+(?:\s*,\s*[A-Z][a-z]+\s+[A-Z][a-z]+)*)'

        for i, line in enumerate(lines[1:30]):  # 标题后30行
            matches = re.findall(name_pattern, line)
            if matches and len(matches[0].split(',')) >= 2:  # 多个作者
                authors = [name.strip() for name in matches[0].split(',')]
                break

        return authors

    def _extract_year(self, text: str) -> Optional[int]:
        """提取发表年份"""
        # 查找4位数字的年份（1900-2030）
        year_pattern = r'(19|20)\d{2}'
        matches = re.findall(year_pattern, text)

        if matches:
            years = [int(match) for match in matches]
            # 返回最合理的年份（通常在2000-2030之间）
            valid_years = [y for y in years if 2000 <= y <= 2030]
            if valid_years:
                return min(valid_years)  # 最早的合理年份

        return None

    def _extract_keywords(self, text: str) -> List[str]:
        """提取关键词"""
        # 查找关键词部分
        keyword_patterns = [
            r'Keywords?[:\-]\s*(.+?)(?:\n\s*\n|\n\s*[A-Z])',
            r'Key words?[:\-]\s*(.+?)(?:\n\s*\n|\n\s*[A-Z])',
            r'关键词[:\-]\s*(.+?)(?:\n\s*\n|\n\s*[A-Z])'
        ]

        for pattern in keyword_patterns:
            match = re.search(pattern, text, re.IGNORECASE | re.DOTALL)
            if match:
                keywords_text = match.group(1)
                # 分割关键词
                keywords = [k.strip() for k in re.split(r'[,;]', keywords_text) if k.strip()]
                return keywords[:10]  # 最多10个关键词

        return []

    def _extract_doi(self, text: str) -> Optional[str]:
        """提取DOI"""
        doi_pattern = r'10\.\d{4,}/[^\s]+'
        match = re.search(doi_pattern, text)
        return match.group(0) if match else None

    def _extract_journal(self, text: str) -> Optional[str]:
        """提取期刊信息"""
        # 常见的期刊模式
        journal_patterns = [
            r'(?:published\s+in|appears\s+in|in)\s+([A-Z][^.]+?)(?:\s*,\s*vol|\s*\d+\s*\(|\n)',
            r'([A-Z][A-Za-z\s]+?(?:Journal|Proceedings|Transactions|Letters))[^a-zA-Z]',
        ]

        for pattern in journal_patterns:
            match = re.search(pattern, text, re.IGNORECASE)
            if match:
                return match.group(1).strip()

        return None

# 定义科研工具

@tool("scan_research_papers", "扫描目录中的科研文献PDF", ["directory"], {"recursive": True, "max_size_mb": 50})
def scan_research_papers(directory: str, recursive: bool = True, max_size_mb: int = 50) -> List[Dict[str, Any]]:
    """
    扫描指定目录中的PDF科研文献，返回文献列表和基础信息

    Args:
        directory: 要扫描的目录路径
        recursive: 是否递归扫描子目录
        max_size_mb: 最大文件大小限制（MB）

    Returns:
        文献列表，包含文件路径、大小、修改时间等信息
    """
    try:
        pdf_files = []
        max_size_bytes = max_size_mb * 1024 * 1024

        # 确定扫描模式
        if recursive:
            for root, dirs, files in os.walk(directory):
                for file in files:
                    if file.lower().endswith('.pdf'):
                        file_path = os.path.join(root, file)
                        pdf_files.append(file_path)
        else:
            for file in os.listdir(directory):
                if file.lower().endswith('.pdf'):
                    file_path = os.path.join(directory, file)
                    pdf_files.append(file_path)

        # 过滤和分析PDF文件
        research_papers = []
        analyzer = ResearchPaperAnalyzer()

        for pdf_path in pdf_files:
            try:
                file_stat = os.stat(pdf_path)

                # 检查文件大小
                if file_stat.st_size > max_size_bytes:
                    continue

                # 基本文件信息
                paper_info = {
                    'file_path': pdf_path,
                    'file_name': os.path.basename(pdf_path),
                    'file_size': file_stat.st_size,
                    'modified_time': datetime.fromtimestamp(file_stat.st_mtime),
                    'is_research_paper': False,  # 默认不是科研文献
                    'confidence': 0.0
                }

                # 初步判断是否为科研文献
                # 1. 文件名分析
                file_name = os.path.basename(pdf_path).lower()
                research_indicators = [
                    'paper', 'article', 'conference', 'journal',
                    'proceedings', 'arxiv', 'research', 'study'
                ]

                name_score = sum(1 for indicator in research_indicators if indicator in file_name)

                # 2. 文件大小分析（科研文献通常在100KB-50MB）
                size_score = 1 if 100 * 1024 < file_stat.st_size < 50 * 1024 * 1024 else 0

                # 3. 尝试提取前1页内容进行分析
                try:
                    doc = fitz.open(pdf_path)
                    if len(doc) > 0:
                        first_page = doc.load_page(0)
                        first_page_text = first_page.get_text()[:500]  # 前500字符

                        # 查找学术文献特征
                        academic_keywords = ['abstract', 'introduction', 'keywords', '摘要', '关键词']
                        content_score = sum(1 for keyword in academic_keywords if keyword.lower() in first_page_text.lower())

                        # 查找作者信息模式
                        author_patterns = [r'[A-Z][a-z]+ [A-Z][a-z]+', r'[A-Z][a-z]+, [A-Z][a-z]+']
                        author_score = 0
                        for pattern in author_patterns:
                            if re.search(pattern, first_page_text):
                                author_score += 1
                                break

                        doc.close()

                        # 计算总体置信度
                        total_score = name_score + size_score + content_score + author_score
                        paper_info['confidence'] = min(total_score / 6, 1.0)  # 标准化到0-1
                        paper_info['is_research_paper'] = paper_info['confidence'] > 0.5

                except Exception:
                    # 无法打开PDF文件，跳过详细分析
                    pass

                research_papers.append(paper_info)

            except Exception:
                # 无法分析该PDF文件，忽略
                continue

        # 按置信度排序
        research_papers.sort(key=lambda x: x['confidence'], reverse=True)

        return {
            "total_files": len(pdf_files),
            "research_papers": research_papers,
            "summary": {
                "total_papers": len([p for p in research_papers if p['is_research_paper']]),
                "high_confidence": len([p for p in research_papers if p['confidence'] > 0.7]),
                "medium_confidence": len([p for p in research_papers if 0.5 < p['confidence'] <= 0.7]),
                "low_confidence": len([p for p in research_papers if p['confidence'] <= 0.5])
            }
        }

    except Exception as e:
        return {"error": f"扫描目录失败: {str(e)}", "research_papers": []}

@tool("parse_research_paper", "解析科研论文结构", ["pdf_path"], {"max_pages": 10})
def parse_research_paper(pdf_path: str, max_pages: int = 10) -> Dict[str, Any]:
    """
    解析科研论文的结构，识别各章节位置和范围

    Args:
        pdf_path: PDF文件路径
        max_pages: 最大解析页数

    Returns:
        论文结构信息，包含各章节位置和置信度
    """
    try:
        if not os.path.exists(pdf_path):
            return {"error": "PDF文件不存在", "structure": {}}

        analyzer = ResearchPaperAnalyzer()

        # 提取文本内容
        text = analyzer.extract_text_from_pdf(pdf_path, max_pages)
        if not text:
            return {"error": "无法提取PDF文本", "structure": {}}

        # 识别结构
        structure = analyzer.structure.identify_sections(text)

        # 提取各部分内容
        sections_content = {}
        lines = text.split('\n')

        for section, positions in structure.items():
            if positions:
                start_pos = positions[0]
                # 查找下一个章节的开始位置
                next_section_pos = len(lines)
                for other_section, other_positions in structure.items():
                    if other_section != section and other_positions:
                        for pos in other_positions:
                            if pos > start_pos and pos < next_section_pos:
                                next_section_pos = pos
                                break

                content = analyzer.structure.extract_section_content(
                    text, start_pos, next_section_pos
                )

                sections_content[section] = {
                    'line_number': start_pos,
                    'content': content[:2000],  # 限制内容长度
                    'content_length': len(content),
                    'confidence': 1.0 if len(positions) == 1 else 0.8
                }

        return {
            "pdf_path": pdf_path,
            "total_pages": min(max_pages, len(fitz.open(pdf_path))),
            "structure_detected": structure,
            "sections_content": sections_content,
            "analysis_quality": {
                "sections_found": len([s for s in sections_content.values() if s['content']]),
                "total_sections": len(structure),
                "confidence": sum(s['confidence'] for s in sections_content.values()) / len(sections_content) if sections_content else 0
            }
        }

    except Exception as e:
        return {"error": f"解析论文结构失败: {str(e)}", "structure": {}}

@tool("extract_paper_metadata", "提取论文元数据", ["pdf_path"])
def extract_paper_metadata(pdf_path: str) -> Dict[str, Any]:
    """
    提取科研论文的元数据信息

    Args:
        pdf_path: PDF文件路径

    Returns:
        论文元数据，包含标题、作者、年份等信息
    """
    try:
        if not os.path.exists(pdf_path):
            return {"error": "PDF文件不存在", "metadata": {}}

        analyzer = ResearchPaperAnalyzer()

        # 提取前3页内容用于元数据提取
        text = analyzer.extract_text_from_pdf(pdf_path, 3)
        if not text:
            return {"error": "无法提取PDF文本", "metadata": {}}

        # 提取元数据
        metadata = analyzer.extract_metadata_from_text(text)

        # 增强元数据信息
        metadata.update({
            "extraction_time": datetime.now(),
            "pdf_path": pdf_path,
            "file_size": os.path.getsize(pdf_path),
            "confidence_scores": {
                "title": 0.8 if metadata['title'] else 0.0,
                "authors": 0.7 if metadata['authors'] else 0.0,
                "year": 0.9 if metadata['year'] else 0.0,
                "keywords": 0.6 if metadata['keywords'] else 0.0
            }
        })

        return {
            "metadata": metadata,
            "extraction_quality": {
                "fields_extracted": sum(1 for v in metadata.values() if v),
                "total_fields": 6,
                "overall_confidence": sum([
                    0.8 if metadata['title'] else 0,
                    0.7 if metadata['authors'] else 0,
                    0.9 if metadata['year'] else 0,
                    0.6 if metadata['keywords'] else 0,
                    0.5 if metadata['doi'] else 0,
                    0.4 if metadata['journal'] else 0
                ]) / 2.9  # 标准化到0-1
            }
        }

    except Exception as e:
        return {"error": f"提取元数据失败: {str(e)}", "metadata": {}}

@tool("extract_abstract_content", "提取论文摘要内容", ["pdf_path"], {"max_length": 2000})
def extract_abstract_content(pdf_path: str, max_length: int = 2000) -> Dict[str, Any]:
    """
    提取论文的摘要内容

    Args:
        pdf_path: PDF文件路径
        max_length: 最大提取长度

    Returns:
        摘要内容和提取信息
    """
    try:
        if not os.path.exists(pdf_path):
            return {"error": "PDF文件不存在", "abstract": ""}

        analyzer = ResearchPaperAnalyzer()

        # 提取前2页内容（摘要通常在前面）
        text = analyzer.extract_text_from_pdf(pdf_path, 2)
        if not text:
            return {"error": "无法提取PDF文本", "abstract": ""}

        # 查找摘要
        abstract_patterns = [
            r'abstract[:\-\s]*\n(.*?)(?:\n\s*\n|\n\s*keywords|\n\s*key\s*words|\n\s*1\s+introduction)',
            r'摘要[:\-\s]*\n(.*?)(?:\n\s*\n|\n\s*关键词|\n\s*1\s+引言)'
        ]

        abstract_text = ""
        confidence = 0.0

        for pattern in abstract_patterns:
            match = re.search(pattern, text, re.IGNORECASE | re.DOTALL)
            if match:
                abstract_text = match.group(1).strip()
                confidence = 0.9 if 'abstract' in pattern else 0.7
                break

        # 备用策略：提取前几段作为摘要
        if not abstract_text:
            lines = text.split('\n')
            paragraphs = []
            current_para = ""

            for line in lines[10:50]:  # 跳过标题和作者信息
                line = line.strip()
                if line:
                    current_para += line + " "
                elif current_para:
                    paragraphs.append(current_para.strip())
                    current_para = ""
                    if len(paragraphs) >= 3:  # 取前3段
                        break

            if paragraphs and len(''.join(paragraphs)) < 1500:  # 合理长度
                abstract_text = '\n'.join(paragraphs)
                confidence = 0.5

        return {
            "abstract": abstract_text[:max_length],
            "abstract_length": len(abstract_text),
            "extraction_confidence": confidence,
            "extraction_method": "pattern_matching" if confidence > 0.5 else "heuristic",
            "quality_indicators": {
                "has_structure": bool(re.search(r'\n\s*\d+\s+', abstract_text)),  # 是否有结构
                "avg_sentence_length": len(abstract_text) / max(abstract_text.count('.'), 1),
                "technical_terms": len(re.findall(r'\b[A-Z][a-z]+\b', abstract_text))  # 技术术语
            }
        }

    except Exception as e:
        return {"error": f"提取摘要失败: {str(e)}", "abstract": ""}

# 注册所有工具到全局工具系统
def register_research_tools():
    """注册科研工具到全局工具系统"""
    print("📚 科研文献处理工具已注册")
    print("可用工具:")
    print("  - scan_research_papers: 扫描目录中的科研文献PDF")
    print("  - parse_research_paper: 解析科研论文结构")
    print("  - extract_paper_metadata: 提取论文元数据")
    print("  - extract_abstract_content: 提取论文摘要内容")

# 自动注册工具
if __name__ != "__main__":
    register_research_tools()

if __name__ == "__main__":
    # 测试工具功能
    print("🔬 测试科研文献处理工具")

    # 创建测试PDF文件路径（需要替换为实际文件）
    test_pdf = "example.pdf"  # 替换为实际的PDF文件路径

    if os.path.exists(test_pdf):
        print(f"\n测试文件: {test_pdf}")

        # 测试扫描功能
        print("\n1. 扫描科研文献...")
        # result = scan_research_papers(".", recursive=False)
        # print(f"找到 {len(result['research_papers'])} 篇科研文献")

        # 测试元数据提取
        print("\n2. 提取元数据...")
        # metadata_result = extract_paper_metadata(test_pdf)
        # print(f"元数据: {metadata_result['metadata']}")

        # 测试摘要提取
        print("\n3. 提取摘要...")
        # abstract_result = extract_abstract_content(test_pdf)
        # print(f"摘要: {abstract_result['abstract'][:200]}...")

        # 测试结构解析
        print("\n4. 解析论文结构...")
        # structure_result = parse_research_paper(test_pdf)
        # print(f"检测到的章节: {list(structure_result['sections_content'].keys())}")

    else:
        print(f"测试文件 {test_pdf} 不存在，请提供有效的PDF文件路径")
        print("工具已定义，可以在实际PDF文件上测试功能")