#!/usr/bin/env python3
"""
Example script demonstrating Qwen3 Client for paper structure analysis
"""

import os
import re
import logging
from pathlib import Path
from typing import List, Dict

from qwen3_client import Qwen3Client, Qwen3Config, PaperStructureAnalyzer, setup_logging


def create_sample_paper_content() -> str:
    """Create sample paper content for demonstration"""
    return """
# An Investigation into Data Assimilation Methods for Numerical Weather Prediction

## Abstract

This paper presents a comprehensive study of data assimilation techniques used in numerical weather prediction systems. We focus on variational methods and ensemble-based approaches, particularly 3D-Var, 4D-Var, and Ensemble Kalman Filter (EnKF).

## 1. Introduction

Data assimilation is a crucial component of modern numerical weather prediction (NWP) systems. It combines observational data with a background forecast to produce an optimal initial state for the model.

The main challenges in data assimilation include:
- Handling different observation types
- Managing computational complexity
- Dealing with model errors

## 2. Methodology

### 2.1 Variational Methods

The variational approach formulates data assimilation as an optimization problem. The cost function J(x) is defined as:

J(x) = (x - x_b)^T B^(-1) (x - x_b) + (H(x) - y)^T R^(-1) (H(x) - y)

where:
- x is the analysis state
- x_b is the background state
- B is the background error covariance matrix
- H is the observation operator
- y are the observations
- R is the observation error covariance matrix

### 2.2 Ensemble Methods

Ensemble Kalman Filter (EnKF) uses an ensemble of model states to estimate the background error covariance matrix dynamically.

## 3. Implementation

The implementation was done using the GSI (Gridpoint Statistical Interpolation) system for variational methods and the EnKF system for ensemble-based data assimilation.

Key components include:
- Background error covariance modeling
- Observation operator implementation
- Quality control procedures
- Parallel processing optimization

## 4. Experiments

We conducted experiments using:
- NCEP GFS model
- Conventional observations (radiosondes, surface stations)
- Satellite observations (AMSU-A, IASI)

The experimental setup included:
1. Control run with 3D-Var
2. Experiment with 4D-Var
3. Experiment with EnKF

## 5. Results

The results show that:
- 4D-Var provides better analysis quality than 3D-Var
- EnKF performs well for short-range forecasts
- Hybrid methods combine advantages of both approaches

Statistical verification shows RMS error reduction of 15-20% for temperature and wind fields.

## 6. Discussion

The choice of data assimilation method depends on:
- Computational resources available
- Forecast requirements
- Observation network density

EnKF excels in capturing flow-dependent error structures, while variational methods provide better constraint with observations.

## 7. Conclusion

This study demonstrates the effectiveness of modern data assimilation techniques for numerical weather prediction. Future work will focus on hybrid variational-ensemble methods and machine learning applications.

## References

1. Kalnay, E. (2003). Atmospheric Modeling, Data Assimilation and Predictability.
2. Evensen, G. (2009). Data Assimilation: The Ensemble Kalman Filter.
3. Bannister, R. N. (2017). A review of operational methods of variational and ensemble-variational data assimilation.
"""


def analyze_paper_sections(paper_content: str, client: Qwen3Client) -> Dict[str, str]:
    """
    Analyze paper sections using Qwen3
    
    Args:
        paper_content: The paper content
        client: Qwen3 client instance
    
    Returns:
        Dictionary of section analyses
    """
    # Extract section titles using regex
    section_pattern = r'^#+\s+(.+)$'
    sections = re.findall(section_pattern, paper_content, re.MULTILINE)
    
    analyses = {}
    
    for section in sections:
        # Clean section title
        clean_section = re.sub(r'^\d+\.?\s*', '', section).strip()
        
        # Ask Qwen3 to classify and analyze
        analysis_prompt = f"""请分析以下学术论文章节标题，并提供：
1. 章节类型分类（如：引言、方法论、实验、结果、结论等）
2. 该章节在论文中的作用
3. 与数据同化领域的相关性

章节标题：{clean_section}

请用中文回答，提供详细的技术分析。"""
        
        try:
            analysis = client.ask_chinese_technical(
                analysis_prompt, 
                domain="学术研究"
            )
            analyses[clean_section] = analysis
            print(f"✅ 分析完成：{clean_section}")
        except Exception as e:
            print(f"❌ 分析失败：{clean_section} - {e}")
            analyses[clean_section] = f"分析失败：{str(e)}"
    
    return analyses


def extract_keywords_by_section(paper_content: str, client: Qwen3Client) -> Dict[str, List[str]]:
    """
    Extract keywords for each section using Qwen3
    
    Args:
        paper_content: The paper content
        client: Qwen3 client instance
    
    Returns:
        Dictionary mapping sections to their keywords
    """
    # Split paper into sections
    sections = {}
    current_section = "Introduction"
    current_content = []
    
    lines = paper_content.strip().split('\n')
    
    for line in lines:
        if re.match(r'^#+\s+', line):
            # Save previous section
            if current_content:
                sections[current_section] = '\n'.join(current_content)
            
            # Start new section
            current_section = re.sub(r'^#+\s+(\d+\.?\s*)?', '', line).strip()
            current_content = []
        else:
            current_content.append(line)
    
    # Save last section
    if current_content:
        sections[current_section] = '\n'.join(current_content)
    
    # Extract keywords for each section
    section_keywords = {}
    
    for section_name, section_text in sections.items():
        if len(section_text.strip()) < 50:  # Skip very short sections
            continue
            
        keyword_prompt = f"""请从以下学术论文章节中提取5-10个最重要的关键词：

章节：{section_name}
内容：
{section_text[:1000]}...

请返回关键词列表，每行一个关键词，包括中英文对照（如适用）。"""

        try:
            keywords_response = client.ask(keyword_prompt)
            # Parse keywords from response
            keywords = []
            for line in keywords_response.split('\n'):
                line = line.strip()
                if line and not line.startswith('#') and len(line) > 2:
                    # Clean up the keyword
                    keyword = re.sub(r'^[-*•]\s*', '', line)
                    keyword = re.sub(r'^\d+\.\s*', '', keyword)
                    if keyword:
                        keywords.append(keyword)
            
            section_keywords[section_name] = keywords[:10]  # Limit to 10 keywords
            print(f"✅ 关键词提取完成：{section_name} ({len(keywords)} 个关键词)")
            
        except Exception as e:
            print(f"❌ 关键词提取失败：{section_name} - {e}")
            section_keywords[section_name] = []
    
    return section_keywords


def main():
    """Main demonstration function"""
    # Setup logging
    logger = setup_logging("INFO")
    
    print("🚀 Qwen3 Client Paper Analysis Demo")
    print("=" * 50)
    
    # Check for API key
    api_key = os.getenv('DASHSCOPE_API_KEY')
    if not api_key:
        print("❌ Error: DASHSCOPE_API_KEY environment variable not set")
        print("Please set your Dashscope API key:")
        print("export DASHSCOPE_API_KEY='your-api-key-here'")
        return
    
    try:
        # Create Qwen3 client
        config = Qwen3Config.from_env()
        client = Qwen3Client(config, logger)
        
        # Test connection
        print("🔗 Testing API connection...")
        if not client.validate_connection():
            print("❌ Failed to connect to Qwen3 API")
            return
        print("✅ Connection successful")
        
        # Create sample paper
        print("📄 Creating sample paper content...")
        paper_content = create_sample_paper_content()
        
        # Save sample paper to file
        sample_paper_path = Path("sample_paper.md")
        with open(sample_paper_path, 'w', encoding='utf-8') as f:
            f.write(paper_content)
        print(f"✅ Sample paper saved to: {sample_paper_path}")
        
        # Analyze paper structure using the specialized analyzer
        print("🔍 Analyzing paper structure...")
        analyzer = PaperStructureAnalyzer(client, logger)
        analysis = analyzer.analyze_paper_file(sample_paper_path)
        
        print(f"📊 Structure Analysis Results:")
        print(f"  - Total sections: {analysis['total_sections']}")
        print(f"  - File type: {analysis['file_type']}")
        print(f"  - Structure completeness: {analysis['structure_summary']['structure_completeness_score']:.2f}")
        
        # Show classified sections
        print("\\n📋 Section Classifications:")
        for section in analysis['classified_sections']:
            print(f"  - {section['title']} → {section['classification']} (confidence: {section.get('confidence', 'N/A')})")
        
        # Extract specific sections
        print("\\n📖 Extracting key sections...")
        extracted_sections = analyzer.extract_section_content(
            sample_paper_path,
            ["introduction", "methodology", "results", "conclusion"]
        )
        
        for section_type, content in extracted_sections.items():
            if content:
                print(f"\\n=== {section_type.upper()} ===")
                print(content[:200] + "..." if len(content) > 200 else content)
        
        # Demonstrate section analysis
        print("\\n🧠 Analyzing sections with Qwen3...")
        section_analyses = analyze_paper_sections(paper_content, client)
        
        # Show first few analyses
        for i, (section, analysis) in enumerate(section_analyses.items()):
            if i >= 2:  # Limit output for demo
                break
            print(f"\\n=== {section} ===")
            print(analysis[:300] + "..." if len(analysis) > 300 else analysis)
        
        # Extract keywords
        print("\\n🔤 Extracting keywords by section...")
        keywords_by_section = extract_keywords_by_section(paper_content, client)
        
        for section, keywords in keywords_by_section.items():
            if keywords:  # Only show sections with keywords
                print(f"\\n{section}: {', '.join(keywords[:5])}")  # Show first 5 keywords
        
        # Save results
        results_dir = Path("analysis_results")
        results_dir.mkdir(exist_ok=True)
        
        # Save structure analysis
        import json
        with open(results_dir / "structure_analysis.json", 'w', encoding='utf-8') as f:
            json.dump(analysis, f, indent=2, ensure_ascii=False)
        
        # Save section analyses
        with open(results_dir / "section_analyses.json", 'w', encoding='utf-8') as f:
            json.dump(section_analyses, f, indent=2, ensure_ascii=False)
        
        # Save keywords
        with open(results_dir / "keywords_by_section.json", 'w', encoding='utf-8') as f:
            json.dump(keywords_by_section, f, indent=2, ensure_ascii=False)
        
        print(f"\\n💾 Results saved to: {results_dir}")
        print("\\n🎉 Demo completed successfully!")
        
        # Show CLI usage examples
        print("\\n" + "=" * 50)
        print("📝 CLI Usage Examples:")
        print("=" * 50)
        print("# Ask a question:")
        print("qwen3-client ask '什么是数据同化?' --chinese")
        print()
        print("# Analyze paper structure:")
        print(f"qwen3-analyze-paper {sample_paper_path} --output analysis.json")
        print()
        print("# Extract specific sections:")
        print(f"qwen3-analyze-paper {sample_paper_path} --extract-sections introduction methodology --output-dir sections/")
        
    except Exception as e:
        logger.error(f"Demo failed: {e}")
        print(f"❌ Demo failed: {e}")


if __name__ == "__main__":
    main()