import asyncio
import json
import os
from datetime import datetime, timedelta
from typing import List, Dict, Any
from dateutil.parser import parse

from data_sources.academic import fetch_arxiv_papers
from data_sources.papers_with_code import fetch_trending_papers
from data_sources.huggingface import fetch_huggingface_models, fetch_trending_datasets
from data_sources.ai_news import fetch_ai_research_news, fetch_openai_blog, fetch_anthropic_blog
from processors.summarizer import summarize_text
from processors.content_parser import parse_content
from mcp_integration.mcp_controller import MCPController
from config.settings import GROQ_API_KEY

PROCESSED_CACHE_DIR = "./cache/processed_articles"
PROCESSED_CACHE_EXPIRATION_HOURS = 1

class DeepTrend:
    def __init__(self):
        # 在这里可以初始化需要的组件，例如模型、数据库连接等
        self.mcp_controller = MCPController()

    async def process_source(self, source_name: str, query: str = "", max_results: int = 20) -> List[Dict[str, Any]]:
        """
        根据指定的数据源获取、处理和总结内容。
        """
        print(f"--- 开始处理数据源: {source_name} ---")
        
        # Define cache file path for processed articles
        cache_filename = f"processed_{source_name}_{hash(query)}_{max_results}.json"
        cache_filepath = os.path.join(PROCESSED_CACHE_DIR, cache_filename)
        os.makedirs(PROCESSED_CACHE_DIR, exist_ok=True)

        # Check for cached processed results
        if os.path.exists(cache_filepath):
            with open(cache_filepath, "r", encoding="utf-8") as f:
                cache_data = json.load(f)
            cached_time = datetime.fromisoformat(cache_data["timestamp"])
            if datetime.now() - cached_time < timedelta(hours=PROCESSED_CACHE_EXPIRATION_HOURS):
                print(f"[DEBUG] Returning cached processed articles for {source_name}.")
                return cache_data["articles"]

        # 1. 获取数据
        articles = []
        if source_name == "arxiv":
            articles = await asyncio.to_thread(fetch_arxiv_papers, query, max_results)
        elif source_name == "papers_with_code":
            articles = await asyncio.to_thread(fetch_trending_papers, max_results)
        elif source_name == "trending_papers":
            articles = await asyncio.to_thread(fetch_trending_papers, 7)  # 最近7天
        elif source_name == "huggingface":
            articles = await asyncio.to_thread(fetch_huggingface_models, max_results)
        elif source_name == "hf_datasets":
            articles = await asyncio.to_thread(fetch_trending_datasets, max_results)
        elif source_name == "ai_news":
            articles = await asyncio.to_thread(fetch_ai_research_news, max_results)
        elif source_name == "openai_blog":
            articles = await asyncio.to_thread(fetch_openai_blog)
        elif source_name == "anthropic_news":
            articles = await asyncio.to_thread(fetch_anthropic_blog)
        else:
            print(f"错误: 未知的数据源 '{source_name}'")
            return []

        print(f"成功获取 {len(articles)} 篇文章。")

        # 2. 处理和总结每篇文章
        print("正在生成摘要...")
        processed_articles = []
        for article in articles:
            print(f"正在处理文章: {article.get('title', 'Unknown Title')}")
            
            # 解析内容
            content = article.get('content', '') or article.get('abstract', '') or article.get('summary', '')
            if not content and article.get('url'):
                try:
                    content = await asyncio.to_thread(parse_content, article['url'])
                except Exception as e:
                    print(f"解析内容失败: {e}")
                    content = article.get('title', '')
            
            # 生成摘要
            summary_en = ""
            summary_zh = ""
            
            if content:
                try:
                    summary_en = summarize_text(content, "English")
                except Exception as e:
                    summary_en = f"Failed to generate English summary: {e}"
                
                try:
                    summary_zh = summarize_text(content, "Chinese")
                except Exception as e:
                    summary_zh = f"Failed to generate Chinese summary: {e}"
            
            processed_article = {
                "title": article.get('title', ''),
                "url": article.get('url', ''),
                "published": article.get('published', ''),
                "authors": article.get('authors', ''),
                "summary_en": summary_en,
                "summary_zh": summary_zh,
                "source": article.get('source', source_name),
                "source_name": article.get('source_name', source_name),
                "github_url": article.get('github_url', ''),
                "downloads": article.get('downloads', 0),
                "likes": article.get('likes', 0),
                "tags": article.get('tags', []),
                "tasks": article.get('tasks', []),
                "images": article.get('images', [])  # 保留图片信息
            }
            processed_articles.append(processed_article)
            print(f"文章 '{article.get('title', 'Unknown')}' 处理完成。")

        # Cache processed results
        cache_data = {
            "timestamp": datetime.now().isoformat(),
            "articles": processed_articles
        }
        with open(cache_filepath, "w", encoding="utf-8") as f:
            json.dump(cache_data, f, ensure_ascii=False, indent=2)

        print(f"--- 数据源: {source_name} 处理完毕 ---")
        return processed_articles

    async def run(self, queries: Dict[str, str], max_results_per_source: int = 5):
        """
        运行Agent，处理所有指定的数据源查询。
        """
        tasks = []
        for source, query in queries.items():
            task = self.process_source(source, query, max_results_per_source)
            tasks.append(task)
        
        results = await asyncio.gather(*tasks)
        
        # 将结果列表的列表扁平化
        all_articles = [article for source_results in results for article in source_results]

        # 按发布日期对所有文章进行排序（降序）
        try:
            all_articles.sort(key=lambda x: parse(x.get('published', '1970-01-01')), reverse=True)
        except Exception as e:
            print(f"排序时出错: {e}")
            # 如果排序失败，保持原有顺序

        # Build the report string
        report_content = []
        report_content.append("\n========== DeepTrend AI文献分析报告 ==========")
        report_content.append(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        report_content.append(f"总计文章数: {len(all_articles)}")
        report_content.append("=" * 50)
        
        if not all_articles:
            report_content.append("未能获取或处理任何文章。")
        else:
            for i, article in enumerate(all_articles, 1):
                report_content.append(f"\n【文章 {i}】")
                report_content.append(f"标题: {article.get('title', 'N/A')}")
                report_content.append(f"作者: {article.get('authors', 'N/A')}")
                report_content.append(f"发布日期: {article.get('published', 'N/A')}")
                report_content.append(f"数据源: {article.get('source_name', article.get('source', 'N/A'))}")
                report_content.append(f"链接: {article.get('url', 'N/A')}")
                
                # 添加图片信息
                images = article.get('images', [])
                if images:
                    report_content.append(f"配图 ({len(images)}张):")
                    for j, image in enumerate(images, 1):
                        report_content.append(f"  图片{j}: {image.get('url', 'N/A')}")
                        if image.get('alt'):
                            report_content.append(f"    描述: {image.get('alt')}")
                        if image.get('type'):
                            report_content.append(f"    类型: {image.get('type')}")
                else:
                    report_content.append("配图: 无")
                
                # 添加额外信息（如果有）
                if article.get('github_url'):
                    report_content.append(f"GitHub: {article['github_url']}")
                if article.get('downloads'):
                    report_content.append(f"下载量: {article['downloads']}")
                if article.get('likes'):
                    report_content.append(f"点赞数: {article['likes']}")
                if article.get('tags'):
                    report_content.append(f"标签: {', '.join(article['tags'])}")
                if article.get('tasks'):
                    report_content.append(f"任务类型: {', '.join(article['tasks'])}")
                
                report_content.append(f"英文摘要: {article.get('summary_en', 'N/A')}")
                report_content.append(f"中文摘要: {article.get('summary_zh', 'N/A')}")
                report_content.append("-" * 40)
        
        report_content.append("\n==========================================")
        final_report = "\n".join(report_content)

        # Print to console
        print(final_report)

        # Save report to file
        output_dir = "./output"
        os.makedirs(output_dir, exist_ok=True)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        report_filename = os.path.join(output_dir, f"deeptrend_report_{timestamp}.txt")
        with open(report_filename, "w", encoding="utf-8") as f:
            f.write(final_report)
        print(f"最终报告已保存到: {report_filename}")

        # 调用MCP控制器进行分析和增强
        enhanced_report = self.mcp_controller.enhance_agent(all_articles)
        if enhanced_report:
            enhanced_filename = os.path.join(output_dir, f"deeptrend_enhanced_{timestamp}.txt")
            with open(enhanced_filename, "w", encoding="utf-8") as f:
                f.write(enhanced_report)
            print(f"增强报告已保存到: {enhanced_filename}")

        return all_articles


async def main():
    """
    主入口函数，用于测试多数据源Agent。
    """
    agent = DeepTrend()
    
    # 定义要查询的多个数据源和关键词
    queries_to_run = {
        "arxiv": 'all:"multimodal" OR all:"large language model" OR all:"agent" OR all:"sota"',
        "trending_papers": "",  # 获取热门论文
        "huggingface": "",  # 获取最新模型
        "hf_datasets": "",  # 获取数据集
        "ai_news": "",  # 获取AI新闻
        "openai_blog": "",  # OpenAI博客
        "anthropic_news": ""  # Anthropic新闻
    }
    
    await agent.run(queries=queries_to_run, max_results_per_source=10)

if __name__ == "__main__":
    # 由于我们使用了asyncio，所以用asyncio.run()来启动
    asyncio.run(main())