#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
export.py - 数据导出模块
负责从数据库读取结果并导出到CSV文件
"""

import sqlite3
import csv
import json
import logging
from typing import Dict, List, Optional, Any
import pandas as pd
from datetime import datetime

logger = logging.getLogger(__name__)

class DataExporter:
    def __init__(self, db_path: str = 'articles.db'):
        """
        初始化数据导出器
        
        Args:
            db_path: 数据库文件路径
        """
        self.db_path = db_path

    def get_export_data(self) -> List[Dict[str, Any]]:
        """
        从数据库获取导出数据
        
        Returns:
            导出数据列表
        """
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        try:
            # 首先获取source_articles信息
            cursor.execute('SELECT id, title FROM source_articles')
            source_articles = {row[0]: row[1] for row in cursor.fetchall()}
            
            # 查询所有citing_articles数据
            query = '''
                SELECT 
                    source_article_id,
                    title as citing_title,
                    url as citing_url,
                    doi as citing_doi,
                    publication_year,
                    authors_info,
                    processed,
                    error_message
                FROM citing_articles
                WHERE processed = 1
                ORDER BY source_article_id, publication_year DESC
            '''
            
            cursor.execute(query)
            results = cursor.fetchall()
            
            export_data = []
            for row in results:
                source_article_id, citing_title, citing_url, citing_doi, pub_year, authors_info_str, processed, error_msg = row
                
                # 获取源文章标题
                source_title = source_articles.get(source_article_id, '未知文章')
                
                # 解析作者信息
                authors_info = []
                if authors_info_str:
                    try:
                        authors_info = json.loads(authors_info_str)
                    except json.JSONDecodeError:
                        logger.warning(f"无法解析作者信息: {authors_info_str}")
                
                export_data.append({
                    'source_title': source_title or '',
                    'citing_title': citing_title or '',
                    'citing_url': citing_url or '',
                    'citing_doi': citing_doi or '',
                    'publication_year': pub_year or '',
                    'authors_info': authors_info,
                    'processed': processed,
                    'error_message': error_msg or ''
                })
            
            logger.info(f"获取到 {len(export_data)} 条导出数据")
            return export_data
            
        finally:
            conn.close()

    def prepare_csv_data(self, export_data: List[Dict[str, Any]]) -> List[Dict[str, str]]:
        """
        准备CSV格式的数据
        
        Args:
            export_data: 原始导出数据
            
        Returns:
            CSV格式的数据列表
        """
        csv_data = []
        
        # 按引用文章分组，合并同一篇文章的所有通讯作者
        grouped_data = {}
        for item in export_data:
            key = (item['source_title'], item['citing_title'])
            if key not in grouped_data:
                grouped_data[key] = {
                    'source_title': item['source_title'],
                    'citing_title': item['citing_title'],
                    'authors_info': []
                }
            
            # 合并作者信息
            if item['authors_info']:
                for author in item['authors_info']:
                    # 避免重复添加相同的作者
                    if author not in grouped_data[key]['authors_info']:
                        grouped_data[key]['authors_info'].append(author)
        
        # 找出最大通讯作者数量，用于确定列数
        max_authors = 0
        for item in grouped_data.values():
            if item['authors_info']:
                max_authors = max(max_authors, len(item['authors_info']))
        
        logger.info(f"最大通讯作者数量: {max_authors}")
        
        # 生成CSV数据
        for item in grouped_data.values():
            row = {
                'eScience发表文章题目': item['source_title'],
                '引用文章': item['citing_title']
            }
            
            # 添加通讯作者信息列
            authors = item['authors_info'] if item['authors_info'] else []
            for i in range(max_authors):
                author_num = i + 1
                if i < len(authors):
                    row[f'通讯作者{author_num}姓名'] = authors[i].get('name', '')
                    row[f'通讯作者{author_num}邮箱'] = authors[i].get('email', '')
                else:
                    row[f'通讯作者{author_num}姓名'] = ''
                    row[f'通讯作者{author_num}邮箱'] = ''
            
            csv_data.append(row)
        
        return csv_data

    def export_to_csv(self, output_file: str = None) -> str:
        """
        导出数据到CSV文件
        
        Args:
            output_file: 输出文件路径，如果为None则自动生成
            
        Returns:
            实际的输出文件路径
        """
        if not output_file:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output_file = f"eScience_citations_{timestamp}.csv"
        
        # 获取导出数据
        export_data = self.get_export_data()
        
        if not export_data:
            logger.warning("没有数据可导出")
            return output_file
        
        # 准备CSV数据
        csv_data = self.prepare_csv_data(export_data)
        
        # 写入CSV文件
        try:
            with open(output_file, 'w', newline='', encoding='utf-8-sig') as csvfile:
                if csv_data:
                    fieldnames = list(csv_data[0].keys())
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    
                    writer.writeheader()
                    writer.writerows(csv_data)
            
            logger.info(f"成功导出 {len(csv_data)} 条记录到 {output_file}")
            
            # 生成统计信息
            self._generate_statistics(export_data, output_file)
            
            return output_file
            
        except Exception as e:
            logger.error(f"导出CSV文件失败: {e}")
            raise

    def _generate_statistics(self, export_data: List[Dict[str, Any]], csv_file: str):
        """
        生成统计信息
        
        Args:
            export_data: 导出数据
            csv_file: CSV文件路径
        """
        try:
            # 统计信息
            total_articles = len(export_data)
            processed_articles = sum(1 for item in export_data if item['processed'])
            articles_with_authors = sum(1 for item in export_data if item['authors_info'])
            total_authors = sum(len(item['authors_info']) for item in export_data if item['authors_info'])
            
            # 按年份统计
            year_stats = {}
            for item in export_data:
                year = item['publication_year']
                if year:
                    year_stats[year] = year_stats.get(year, 0) + 1
            
            # 按源文章统计
            source_stats = {}
            for item in export_data:
                source = item['source_title']
                if source:
                    source_stats[source] = source_stats.get(source, 0) + 1
            
            # 生成统计报告
            stats_file = csv_file.replace('.csv', '_statistics.txt')
            with open(stats_file, 'w', encoding='utf-8') as f:
                f.write("eScience期刊引用文献统计报告\n")
                f.write("=" * 50 + "\n\n")
                
                f.write(f"总体统计:\n")
                f.write(f"  总引用文献数: {total_articles}\n")
                f.write(f"  已处理文献数: {processed_articles}\n")
                f.write(f"  有通讯作者信息的文献数: {articles_with_authors}\n")
                f.write(f"  通讯作者总数: {total_authors}\n")
                f.write(f"  处理成功率: {processed_articles/total_articles*100:.1f}%\n\n")
                
                f.write(f"按发表年份统计:\n")
                for year in sorted(year_stats.keys(), reverse=True):
                    f.write(f"  {year}年: {year_stats[year]}篇\n")
                f.write("\n")
                
                f.write(f"按源文章统计:\n")
                for source, count in sorted(source_stats.items(), key=lambda x: x[1], reverse=True):
                    f.write(f"  {source}: {count}篇引用\n")
            
            logger.info(f"统计报告已生成: {stats_file}")
            
        except Exception as e:
            logger.error(f"生成统计信息失败: {e}")

    def export_to_excel(self, output_file: str = None) -> str:
        """
        导出数据到Excel文件
        
        Args:
            output_file: 输出文件路径，如果为None则自动生成
            
        Returns:
            实际的输出文件路径
        """
        if not output_file:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output_file = f"eScience_citations_{timestamp}.xlsx"
        
        # 获取导出数据
        export_data = self.get_export_data()
        
        if not export_data:
            logger.warning("没有数据可导出")
            return output_file
        
        # 准备CSV数据
        csv_data = self.prepare_csv_data(export_data)
        
        # 转换为DataFrame并导出
        try:
            df = pd.DataFrame(csv_data)
            
            with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
                # 主数据表
                df.to_excel(writer, sheet_name='引用文献数据', index=False)
                
                # 统计表
                stats_df = self._create_statistics_dataframe(export_data)
                stats_df.to_excel(writer, sheet_name='统计信息', index=False)
            
            logger.info(f"成功导出 {len(csv_data)} 条记录到 {output_file}")
            return output_file
            
        except Exception as e:
            logger.error(f"导出Excel文件失败: {e}")
            raise

    def _create_statistics_dataframe(self, export_data: List[Dict[str, Any]]) -> pd.DataFrame:
        """
        创建统计信息DataFrame
        
        Args:
            export_data: 导出数据
            
        Returns:
            统计信息DataFrame
        """
        # 按年份统计
        year_stats = {}
        for item in export_data:
            year = item['publication_year']
            if year:
                year_stats[year] = year_stats.get(year, 0) + 1
        
        stats_data = []
        for year in sorted(year_stats.keys(), reverse=True):
            stats_data.append({
                '发表年份': year,
                '文献数量': year_stats[year]
            })
        
        return pd.DataFrame(stats_data)

    def run_export_step(self, output_format: str = 'csv', output_file: str = None):
        """
        执行导出步骤
        
        Args:
            output_format: 输出格式，'csv' 或 'excel'
            output_file: 输出文件路径
        """
        logger.info("开始执行第三步：导出数据")
        
        try:
            if output_format.lower() == 'excel':
                result_file = self.export_to_excel(output_file)
            else:
                result_file = self.export_to_csv(output_file)
            
            logger.info(f"第三步完成，数据已导出到: {result_file}")
            return result_file
            
        except Exception as e:
            logger.error(f"导出失败: {e}")
            raise

def main():
    """主函数"""
    import sys
    
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    
    # 获取数据库路径参数
    db_path = 'articles.db'  # 默认值
    if len(sys.argv) > 1:
        db_path = sys.argv[1]
    
    logger.info(f"使用数据库: {db_path}")
    
    # 创建导出器实例
    exporter = DataExporter(db_path)
    
    # 执行导出
    exporter.run_export_step('csv')
    exporter.run_export_step('excel')

if __name__ == "__main__":
    main()