"""
Paper structure analysis functionality using Qwen3
"""

import re
import logging
from pathlib import Path
from typing import List, Dict, Optional, Tuple
from .core import Qwen3Client, Qwen3Config


class PaperStructureAnalyzer:
    """Analyze paper structure using Qwen3 model"""
    
    # Standard academic paper sections
    STANDARD_SECTIONS = [
        "abstract", "introduction", "related_work", "methodology", 
        "algorithm", "implementation", "experiments", "results", 
        "discussion", "conclusion", "references", "appendix"
    ]
    
    def __init__(self, qwen_client: Qwen3Client, logger: Optional[logging.Logger] = None):
        self.qwen_client = qwen_client
        self.logger = logger or logging.getLogger(__name__)
    
    def analyze_paper_file(self, file_path: Path) -> Dict[str, any]:
        """
        Analyze a paper file to extract structure
        
        Args:
            file_path: Path to paper file (LaTeX, Markdown, or text)
        
        Returns:
            Dictionary containing analyzed structure
        """
        if not file_path.exists():
            raise FileNotFoundError(f"Paper file not found: {file_path}")
        
        # Read file content
        content = self._read_paper_file(file_path)
        
        # Extract sections based on file type
        sections = self._extract_sections(content, file_path.suffix)
        
        # Classify sections using Qwen3
        classified_sections = self._classify_sections_with_qwen(sections)
        
        # Analyze content of each section
        analyzed_content = self._analyze_section_content(content, classified_sections)
        
        result = {
            "file_path": str(file_path),
            "file_type": file_path.suffix,
            "total_sections": len(sections),
            "raw_sections": sections,
            "classified_sections": classified_sections,
            "content_analysis": analyzed_content,
            "structure_summary": self._generate_structure_summary(classified_sections)
        }
        
        self.logger.info(f"Analyzed paper structure: {len(sections)} sections found")
        return result
    
    def extract_section_content(self, file_path: Path, section_types: List[str]) -> Dict[str, str]:
        """
        Extract content for specific section types
        
        Args:
            file_path: Path to paper file
            section_types: List of section types to extract (e.g., ['introduction', 'methodology'])
        
        Returns:
            Dictionary mapping section types to their content
        """
        analysis = self.analyze_paper_file(file_path)
        classified_sections = analysis["classified_sections"]
        
        content = self._read_paper_file(file_path)
        extracted_content = {}
        
        for section_type in section_types:
            matching_sections = [s for s in classified_sections if s["classification"] == section_type]
            
            if matching_sections:
                # Combine content from all matching sections
                combined_content = []
                for section in matching_sections:
                    section_content = self._extract_section_text(content, section)
                    if section_content:
                        combined_content.append(f"## {section['title']}\n\n{section_content}")
                
                extracted_content[section_type] = "\n\n".join(combined_content)
            else:
                extracted_content[section_type] = ""
        
        return extracted_content
    
    def classify_custom_sections(self, section_titles: List[str], domain: str = "academic") -> List[Dict[str, str]]:
        """
        Classify custom section titles using Qwen3
        
        Args:
            section_titles: List of section titles to classify
            domain: Domain context for classification
        
        Returns:
            List of classification results
        """
        classification_prompt = f"""请将以下{domain}文档的章节标题分类到标准学术论文结构中。

标准分类包括：
- abstract: 摘要
- introduction: 引言/介绍
- related_work: 相关工作/文献综述
- methodology: 方法论/理论基础
- algorithm: 算法/具体方法
- implementation: 实现/系统设计
- experiments: 实验/验证
- results: 结果/分析
- discussion: 讨论
- conclusion: 结论
- references: 参考文献
- appendix: 附录
- other: 其他

请为每个章节标题返回最合适的分类，格式：
标题 -> 分类

章节标题列表：
{chr(10).join(f'{i+1}. {title}' for i, title in enumerate(section_titles))}"""

        try:
            response = self.qwen_client.ask(classification_prompt)
            return self._parse_classification_response(response, section_titles)
        except Exception as e:
            self.logger.error(f"Failed to classify sections: {e}")
            # Fallback to simple classification
            return [{"title": title, "classification": "other", "confidence": 0.5} for title in section_titles]
    
    def _read_paper_file(self, file_path: Path) -> str:
        """Read paper file content"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                return f.read()
        except UnicodeDecodeError:
            # Try different encodings
            for encoding in ['latin1', 'cp1252', 'iso-8859-1']:
                try:
                    with open(file_path, 'r', encoding=encoding) as f:
                        return f.read()
                except UnicodeDecodeError:
                    continue
            raise UnicodeDecodeError(f"Could not read file {file_path} with any encoding")
    
    def _extract_sections(self, content: str, file_type: str) -> List[Dict[str, any]]:
        """Extract sections based on file type"""
        sections = []
        
        if file_type == '.tex':
            # LaTeX sections: \section{title}, \subsection{title}, etc.
            patterns = [
                r'\\section\*?\{([^}]+)\}',
                r'\\subsection\*?\{([^}]+)\}',
                r'\\subsubsection\*?\{([^}]+)\}'
            ]
            
            for pattern in patterns:
                matches = re.finditer(pattern, content, re.IGNORECASE)
                for match in matches:
                    sections.append({
                        "title": match.group(1).strip(),
                        "level": pattern.count('sub') + 1,
                        "start_pos": match.start(),
                        "end_pos": match.end(),
                        "type": "latex_section"
                    })
        
        elif file_type in ['.md', '.markdown']:
            # Markdown headers: # Title, ## Title, etc.
            pattern = r'^(#{1,6})\s+(.+)$'
            matches = re.finditer(pattern, content, re.MULTILINE)
            
            for match in matches:
                level = len(match.group(1))
                title = match.group(2).strip()
                sections.append({
                    "title": title,
                    "level": level,
                    "start_pos": match.start(),
                    "end_pos": match.end(),
                    "type": "markdown_header"
                })
        
        else:
            # Generic text - look for numbered sections or capitalized lines
            patterns = [
                r'^(\d+\.?\d*\.?)\s+([A-Z][^\\n]*?)$',  # "1. INTRODUCTION"
                r'^([IVX]+\.)\s+([A-Z][^\\n]*?)$',       # "I. INTRODUCTION"
                r'^([A-Z][A-Z\s]{3,}[A-Z])$'            # "INTRODUCTION"
            ]
            
            for pattern in patterns:
                matches = re.finditer(pattern, content, re.MULTILINE)
                for match in matches:
                    if len(match.groups()) == 2:
                        sections.append({
                            "title": match.group(2).strip(),
                            "level": 1,
                            "start_pos": match.start(),
                            "end_pos": match.end(),
                            "type": "text_section",
                            "number": match.group(1)
                        })
                    else:
                        sections.append({
                            "title": match.group(1).strip(),
                            "level": 1,
                            "start_pos": match.start(),
                            "end_pos": match.end(),
                            "type": "text_section"
                        })
        
        # Sort by position in document
        sections.sort(key=lambda x: x["start_pos"])
        return sections
    
    def _classify_sections_with_qwen(self, sections: List[Dict]) -> List[Dict]:
        """Classify sections using Qwen3"""
        if not sections:
            return []
        
        section_titles = [s["title"] for s in sections]
        classifications = self.classify_custom_sections(section_titles)
        
        # Merge classification results with section data
        classified_sections = []
        for i, section in enumerate(sections):
            classification = classifications[i] if i < len(classifications) else {"classification": "other", "confidence": 0.5}
            
            classified_section = section.copy()
            classified_section.update(classification)
            classified_sections.append(classified_section)
        
        return classified_sections
    
    def _analyze_section_content(self, content: str, classified_sections: List[Dict]) -> Dict[str, any]:
        """Analyze content of each section"""
        analysis = {
            "section_lengths": {},
            "keywords_by_section": {},
            "content_types": {}
        }
        
        for i, section in enumerate(classified_sections):
            section_text = self._extract_section_text(content, section, classified_sections, i)
            
            analysis["section_lengths"][section["title"]] = len(section_text)
            analysis["keywords_by_section"][section["title"]] = self._extract_keywords(section_text)
            analysis["content_types"][section["title"]] = self._analyze_content_type(section_text)
        
        return analysis
    
    def _extract_section_text(self, content: str, section: Dict, 
                            all_sections: Optional[List[Dict]] = None, 
                            section_index: Optional[int] = None) -> str:
        """Extract text content for a specific section"""
        start_pos = section["end_pos"]
        
        # Find end position (start of next section or end of document)
        if all_sections and section_index is not None and section_index < len(all_sections) - 1:
            end_pos = all_sections[section_index + 1]["start_pos"]
        else:
            end_pos = len(content)
        
        section_text = content[start_pos:end_pos].strip()
        return section_text
    
    def _extract_keywords(self, text: str) -> List[str]:
        """Extract keywords from text"""
        # Simple keyword extraction - can be enhanced
        words = re.findall(r'\b[A-Za-z]{4,}\b', text.lower())
        # Filter common words and get unique ones
        common_words = {'that', 'this', 'with', 'from', 'they', 'have', 'will', 'been', 'were', 'said', 'each', 'which', 'their', 'time', 'would', 'there', 'could', 'other'}
        keywords = list(set(word for word in words if word not in common_words))
        return keywords[:10]  # Return top 10 keywords
    
    def _analyze_content_type(self, text: str) -> Dict[str, any]:
        """Analyze content type characteristics"""
        analysis = {
            "has_equations": bool(re.search(r'\$.*?\$|\\begin\{equation\}', text)),
            "has_figures": bool(re.search(r'\\begin\{figure\}|!\[.*?\]|\[.*?\]\(.*?\)', text)),
            "has_tables": bool(re.search(r'\\begin\{table\}|\|.*?\|', text)),
            "has_code": bool(re.search(r'```|\\begin\{lstlisting\}|\\begin\{verbatim\}', text)),
            "has_citations": bool(re.search(r'\\cite\{|\\ref\{|\[[0-9,\s]+\]', text)),
            "word_count": len(text.split()),
            "avg_sentence_length": self._calculate_avg_sentence_length(text)
        }
        return analysis
    
    def _calculate_avg_sentence_length(self, text: str) -> float:
        """Calculate average sentence length"""
        sentences = re.split(r'[.!?]+', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        if not sentences:
            return 0.0
        
        total_words = sum(len(sentence.split()) for sentence in sentences)
        return total_words / len(sentences)
    
    def _parse_classification_response(self, response: str, section_titles: List[str]) -> List[Dict[str, str]]:
        """Parse Qwen3 classification response"""
        classifications = []
        
        # Try to parse the response
        lines = response.strip().split('\n')
        
        for i, title in enumerate(section_titles):
            classification = "other"
            confidence = 0.5
            
            # Look for classification in response
            for line in lines:
                if title.lower() in line.lower():
                    # Try to extract classification
                    for std_section in self.STANDARD_SECTIONS:
                        if std_section in line.lower():
                            classification = std_section
                            confidence = 0.8
                            break
                    break
            
            classifications.append({
                "title": title,
                "classification": classification,
                "confidence": confidence
            })
        
        return classifications
    
    def _generate_structure_summary(self, classified_sections: List[Dict]) -> Dict[str, any]:
        """Generate summary of paper structure"""
        section_counts = {}
        for section in classified_sections:
            classification = section["classification"]
            section_counts[classification] = section_counts.get(classification, 0) + 1
        
        # Check for standard academic structure
        has_intro = any(s["classification"] == "introduction" for s in classified_sections)
        has_method = any(s["classification"] in ["methodology", "algorithm"] for s in classified_sections)
        has_results = any(s["classification"] in ["results", "experiments"] for s in classified_sections)
        has_conclusion = any(s["classification"] == "conclusion" for s in classified_sections)
        
        structure_score = sum([has_intro, has_method, has_results, has_conclusion]) / 4.0
        
        return {
            "section_counts": section_counts,
            "has_standard_structure": structure_score >= 0.75,
            "structure_completeness_score": structure_score,
            "missing_standard_sections": [
                section for section in ["introduction", "methodology", "results", "conclusion"]
                if section not in section_counts
            ]
        }