from abc import ABC, abstractmethod
import arxiv
import pandas as pd
import matplotlib.pyplot as plt
from textblob import TextBlob
import logging
import argparse
from report_generator import ReportGenerator
from datetime import datetime, timedelta
from typing import List, Dict, Optional

class DataFetcher(ABC):
    """数据获取抽象基类"""
    @abstractmethod
    def fetch(self, query: str, max_results: int, **kwargs) -> pd.DataFrame:
        pass

class ArxivFetcher(DataFetcher):
    """arXiv数据获取实现"""
    def fetch(self, query: str, max_results: int = 10, 
             days: Optional[int] = None) -> pd.DataFrame:
        papers = []
        search = arxiv.Search(
            query=query,
            max_results=max_results,
            sort_by=arxiv.SortCriterion.SubmittedDate
        )
        
        client = arxiv.Client()
        for result in client.results(search):
            if days and (datetime.now() - result.published).days > days:
                continue
            papers.append({
                'title': result.title,
                'authors': [a.name for a in result.authors],
                'summary': result.summary,
                'published': result.published,
                'categories': result.categories,
                'source': 'arXiv'
            })
        return pd.DataFrame(papers)

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class TechTrendAnalyzer:
    def __init__(self, fetchers: List[DataFetcher] = None):
        """初始化分析器
        
        Args:
            fetchers: 数据获取器列表，默认为[ArxivFetcher()]
        """
        self.data = pd.DataFrame()
        self.fetchers = fetchers or [ArxivFetcher()]
        
    def fetch_data(self, query: str = "artificial intelligence", 
                  max_results: int = 10, **kwargs) -> pd.DataFrame:
        """从配置的数据源获取论文数据
        
        Args:
            query: 搜索查询
            max_results: 每个数据源最大结果数
            **kwargs: 传递给数据获取器的额外参数
            
        Returns:
            合并后的论文DataFrame
        """
        all_data = []
        for fetcher in self.fetchers:
            try:
                data = fetcher.fetch(query, max_results, **kwargs)
                all_data.append(data)
                logger.info(f"Fetched {len(data)} papers from {fetcher.__class__.__name__}")
            except Exception as e:
                logger.error(f"Error fetching from {fetcher.__class__.__name__}: {str(e)}")
                
        self.data = pd.concat(all_data, ignore_index=True) if all_data else pd.DataFrame()
        logger.info(f"Total fetched papers: {len(self.data)}")
        return self.data
        
    def fetch_arxiv_papers(self, query="artificial intelligence", max_results=10):
        """兼容接口：从arXiv获取科技论文数据"""
        logger.warning("This method is deprecated. Use fetch_data() instead.")
        self.fetchers = [ArxivFetcher()]
        return self.fetch_data(query, max_results)
    
    def analyze_trends(self, time_window: str = 'month'):
        """分析科技趋势
        
        Args:
            time_window: 时间分组窗口 ('day', 'week', 'month', 'year')
            
        Returns:
            包含分析结果的字典
        """
        if self.data.empty:
            raise ValueError("No data to analyze. Please fetch data first.")
            
        # 准备时间序列数据
        self.data['published'] = pd.to_datetime(self.data['published'])
        self.data['time_group'] = self.data['published'].dt.to_period(time_window[0].upper())
        
        # 统计各领域论文数量
        self.data['primary_category'] = self.data['categories'].apply(
            lambda x: x[0] if len(x) > 0 else 'other'
        )
        trend_counts = self.data['primary_category'].value_counts()
        
        # 时间序列分析
        time_series = self.data.groupby('time_group').size()
        time_trend = "上升" if time_series.pct_change().iloc[-1] > 0 else "下降"
        
        # 使用TextBlob分析标题和摘要
        all_text = " ".join(self.data['title'] + " " + self.data['summary'])
        blob = TextBlob(all_text)
        
        # 提取名词短语作为关键词
        keywords = blob.noun_phrases
        top_keywords = pd.Series(keywords).value_counts().head(5)
        
        # 分析整体情感倾向
        sentiment = blob.sentiment.polarity
        sentiment_label = "positive" if sentiment > 0 else "negative" if sentiment < 0 else "neutral"
        
        # 生成趋势分析报告
        trend_analysis = f"""
        科技趋势分析报告 (时间窗口: {time_window}):
        - 主要研究方向: {', '.join(top_keywords.index)}
        - 研究情感倾向: {sentiment_label} (得分: {sentiment:.2f})
        - 热门领域分布: {trend_counts.idxmax()} (共 {trend_counts.max()} 篇)
        - 发表趋势: {time_trend} (最新{time_window}: {time_series.iloc[-1]}篇)
        """
        
        # 生成可视化
        self._generate_visualization(trend_counts, time_series)
        
        return {
            'category_counts': trend_counts,
            'trend_analysis': trend_analysis,
            'top_keywords': top_keywords,
            'time_series': time_series
        }
        
    def _generate_visualization(self, category_counts, time_series):
        """生成可视化图表"""
        plt.figure(figsize=(15, 8))
        
        # 领域分布图
        plt.subplot(2, 1, 1)
        category_counts.head(10).plot(kind='bar', color='skyblue')
        plt.title('Top Research Categories')
        plt.ylabel('Paper Count')
        
        # 时间趋势图
        plt.subplot(2, 1, 2)
        time_series.plot(kind='line', marker='o', color='green')
        plt.title('Publication Trend Over Time')
        plt.ylabel('Papers Published')
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig('tech_trends.png')
        logger.info("Saved visualization to tech_trends.png")
    
    def visualize_trends(self, analysis_result):
        """可视化趋势数据"""
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
        
        # 绘制领域分布图
        analysis_result['category_counts'].plot(
            kind='bar', 
            ax=ax1,
            title='Paper Distribution by Category'
        )
        
        # 添加趋势分析文本
        ax2.text(
            0.1, 0.5, 
            analysis_result['trend_analysis'],
            fontsize=10,
            wrap=True
        )
        ax2.axis('off')
        ax2.set_title('Trend Analysis')
        
        plt.tight_layout()
        plt.savefig('tech_trends.png')
        logger.info("Saved visualization to tech_trends.png")
        return fig

from .config import AppConfig

def configure_logging(config: AppConfig):
    """配置日志系统"""
    logging.basicConfig(
        level=config.log_level,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(config.log_file)
        ]
    )

def run_cli_mode(args, config: AppConfig):
    """运行命令行模式"""
    try:
        analyzer = TechTrendAnalyzer()
        
        # 1. 获取数据
        analyzer.fetch_data(query=args.query, max_results=args.max_results)
        
        # 2. 分析趋势
        analysis = analyzer.analyze_trends(time_window=args.time_window)
        print("\nTrend Analysis Results:")
        print(analysis['trend_analysis'])
        
        # 3. 可视化
        analyzer.visualize_trends(analysis)
        print("\nVisualization saved to tech_trends.png")
        
        # 4. 生成PDF报告
        if args.report:
            generator = ReportGenerator()
            if generator.generate_report(analysis):
                print("\nPDF report generated: tech_trend_report.pdf")
            else:
                print("\nFailed to generate PDF report")
        
    except Exception as e:
        logger.error(f"An error occurred: {str(e)}")
        raise

def main():
    """主程序入口"""
    try:
        # 初始化配置
        config = AppConfig()
        if not config.validate():
            logger.error("配置验证失败，请检查.env文件")
            sys.exit(1)
            
        configure_logging(config)
        
        parser = argparse.ArgumentParser(description='科技趋势分析工具')
        subparsers = parser.add_subparsers(dest='mode', required=True)
        
        # CLI模式
        cli_parser = subparsers.add_parser('cli', help='命令行模式')
        cli_parser.add_argument('--query', type=str, default=config.default_query,
                              help=f'搜索查询词 (默认: {config.default_query})')
        cli_parser.add_argument('--max-results', type=int, default=config.max_results,
                              help=f'最大结果数 (默认: {config.max_results})')
        cli_parser.add_argument('--time-window', type=str, default=config.time_window,
                              choices=['day', 'week', 'month', 'year'],
                              help=f'时间分析窗口 (默认: {config.time_window})')
        cli_parser.add_argument('--report', action='store_true',
                              help='生成PDF报告')
        
        # API模式
        api_parser = subparsers.add_parser('api', help='API服务模式')
        api_parser.add_argument('--host', type=str, default=config.api_host,
                               help=f'API服务主机 (默认: {config.api_host})')
        api_parser.add_argument('--port', type=int, default=config.api_port,
                               help=f'API服务端口 (默认: {config.api_port})')
        
        args = parser.parse_args()
        
        if args.mode == 'cli':
            run_cli_mode(args, config)
        else:
            from .api_service import run_api
            logger.info(f"启动API服务: {args.host}:{args.port}")
            run_api(host=args.host, port=args.port)
            
    except KeyboardInterrupt:
        logger.info("程序被用户中断")
        sys.exit(0)
    except Exception as e:
        logger.error(f"程序异常终止: {str(e)}", exc_info=True)
        sys.exit(1)

if __name__ == "__main__":
    main()