from fastapi import APIRouter, Depends, HTTPException, Query, Path, Body
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
import logging
import yaml
import os
from pydantic import BaseModel, Field

# 导入模块
from app.core.crawlers.crawler_manager import CrawlerManager
from app.core.processors.processor_manager import ProcessorManager
from app.database.db_manager import DatabaseManager
from app.core.analyzers.trend_analyzer import TrendAnalyzer
from app.core.recommenders.user_recommender import UserRecommender

# 创建路由对象
router = APIRouter()

# 全局配置和管理器
config = {}
crawler_manager = None
processor_manager = None
db_manager = None
trend_analyzer = None
user_recommender = None

# 加载配置
def load_config():
    global config, crawler_manager, processor_manager, db_manager, trend_analyzer, user_recommender
    
    try:
        config_path = os.environ.get('CONFIG_PATH', 'config.yml')
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
            
        # 初始化管理器
        db_manager = DatabaseManager(config)
        crawler_manager = CrawlerManager(config_path)
        processor_manager = ProcessorManager(config)
        trend_analyzer = TrendAnalyzer(db_manager)
        user_recommender = UserRecommender(db_manager)
        
        logging.info("API managers initialized")
        
    except Exception as e:
        logging.error(f"Failed to load config: {str(e)}")
        raise

# 数据模型
class PaginationParams(BaseModel):
    page: int = Field(1, ge=1, description="页码")
    page_size: int = Field(20, ge=1, le=100, description="每页数量")

class DateRangeParams(BaseModel):
    start_date: Optional[datetime] = Field(None, description="开始日期")
    end_date: Optional[datetime] = Field(None, description="结束日期")

class SearchParams(BaseModel):
    query: str = Field(..., min_length=1, description="搜索关键词")
    filters: Optional[Dict[str, Any]] = Field(None, description="过滤条件")
    
class UserCreate(BaseModel):
    username: str = Field(..., min_length=3, description="用户名")
    email: str = Field(..., min_length=5, description="邮箱")
    password: str = Field(..., min_length=6, description="密码(加密后)")
    display_name: Optional[str] = Field(None, description="显示名称")
    interests: Optional[List[str]] = Field(None, description="兴趣标签")

# 依赖项
def get_db_manager():
    global db_manager
    if db_manager is None:
        load_config()
    return db_manager

def get_crawler_manager():
    global crawler_manager
    if crawler_manager is None:
        load_config()
    return crawler_manager

def get_processor_manager():
    global processor_manager
    if processor_manager is None:
        load_config()
    return processor_manager

def get_trend_analyzer():
    global trend_analyzer
    if trend_analyzer is None:
        load_config()
    return trend_analyzer

def get_user_recommender():
    global user_recommender
    if user_recommender is None:
        load_config()
    return user_recommender

# API路由
@router.get("/")
async def root():
    """API根路径，返回基本信息"""
    return {
        "name": "AI资讯聚合与分析平台API",
        "version": "0.1.0",
        "status": "online"
    }

# 爬虫相关接口
@router.post("/crawl/all")
async def crawl_all(crawler_mgr: CrawlerManager = Depends(get_crawler_manager),
                    processor_mgr: ProcessorManager = Depends(get_processor_manager),
                    db_mgr: DatabaseManager = Depends(get_db_manager)):
    """执行所有爬虫，处理数据并保存到数据库"""
    try:
        # 爬取数据
        crawled_data = crawler_mgr.crawl_all()
        
        # 处理数据
        processed_data = processor_mgr.process_data_by_type(crawled_data)
        
        # 保存数据
        total_saved = 0
        for key, items in processed_data.items():
            if key.startswith('papers.'):
                total_saved += db_mgr.save_items('papers', items)
            else:
                total_saved += db_mgr.save_items('news', items)
        
        return {
            "status": "success",
            "data": {
                "crawled_sources": len(crawled_data),
                "total_items": sum(len(items) for items in crawled_data.values()),
                "processed_items": sum(len(items) for items in processed_data.values()),
                "saved_items": total_saved
            }
        }
    except Exception as e:
        logging.error(f"Crawl all failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"执行爬虫失败: {str(e)}")

@router.post("/crawl/{source_type}/{source_name}")
async def crawl_single(
    source_type: str = Path(..., description="数据源类型"),
    source_name: str = Path(..., description="数据源名称"),
    crawler_mgr: CrawlerManager = Depends(get_crawler_manager),
    processor_mgr: ProcessorManager = Depends(get_processor_manager),
    db_mgr: DatabaseManager = Depends(get_db_manager)
):
    """执行单个爬虫，处理数据并保存到数据库"""
    try:
        # 爬取数据
        crawled_data = crawler_mgr.crawl_single(source_type, source_name)
        
        # 处理数据
        processed_data = processor_mgr.process_data(crawled_data)
        
        # 保存数据
        if source_type == 'papers':
            saved_count = db_mgr.save_items('papers', processed_data)
        else:
            saved_count = db_mgr.save_items('news', processed_data)
        
        return {
            "status": "success",
            "data": {
                "source": f"{source_type}.{source_name}",
                "crawled_items": len(crawled_data),
                "processed_items": len(processed_data),
                "saved_items": saved_count
            }
        }
    except Exception as e:
        logging.error(f"Crawl single failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"执行爬虫失败: {str(e)}")

# 数据查询接口
@router.get("/news")
async def get_news(
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    type: Optional[str] = Query(None, description="新闻类型"),
    source: Optional[str] = Query(None, description="来源"),
    topic: Optional[str] = Query(None, description="主题"),
    days: Optional[int] = Query(30, description="过去几天"),
    db_mgr: DatabaseManager = Depends(get_db_manager)
):
    """获取新闻列表"""
    try:
        # 构建查询条件
        query = {}
        
        if type:
            query['type'] = type
            
        if source:
            query['source'] = source
            
        if topic:
            query['topic_categories'] = topic
            
        if days:
            query['created_at'] = {'$gte': datetime.now() - timedelta(days=days)}
            
        # 分页
        skip = (page - 1) * page_size
        
        # 排序：先按热度排序，再按时间倒序
        sort = [('popularity_score', -1), ('created_at', -1)]
        
        # 查询数据
        items = db_mgr.get_items('news', query, sort, page_size, skip)
        
        # 统计总数
        # 简化起见，这里使用固定值，实际应查询数据库
        total = 1000
        
        return {
            "status": "success",
            "data": {
                "items": items,
                "pagination": {
                    "page": page,
                    "page_size": page_size,
                    "total": total,
                    "total_pages": (total + page_size - 1) // page_size
                }
            }
        }
    except Exception as e:
        logging.error(f"Get news failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取新闻失败: {str(e)}")

@router.get("/papers")
async def get_papers(
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页数量"),
    category: Optional[str] = Query(None, description="类别"),
    sort_by: Optional[str] = Query("date", description="排序方式(date/citations)"),
    days: Optional[int] = Query(90, description="过去几天"),
    db_mgr: DatabaseManager = Depends(get_db_manager)
):
    """获取论文列表"""
    try:
        # 构建查询条件
        query = {'type': 'paper'}
        
        if category:
            query['categories'] = category
            
        if days:
            query['created_at'] = {'$gte': datetime.now() - timedelta(days=days)}
            
        # 分页
        skip = (page - 1) * page_size
        
        # 排序
        if sort_by == "citations":
            sort = [('citation_count', -1), ('created_at', -1)]
        else:
            sort = [('created_at', -1)]
        
        # 查询数据
        items = db_mgr.get_items('papers', query, sort, page_size, skip)
        
        # 统计总数
        # 简化起见，这里使用固定值，实际应查询数据库
        total = 500
        
        return {
            "status": "success",
            "data": {
                "items": items,
                "pagination": {
                    "page": page,
                    "page_size": page_size,
                    "total": total,
                    "total_pages": (total + page_size - 1) // page_size
                }
            }
        }
    except Exception as e:
        logging.error(f"Get papers failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取论文失败: {str(e)}")

@router.post("/search")
async def search(
    params: SearchParams,
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=50, description="每页数量"),
    db_mgr: DatabaseManager = Depends(get_db_manager)
):
    """搜索内容"""
    try:
        # 查询
        results = db_mgr.search_items(params.query, 'news', params.filters, page_size)
        
        if len(results) < page_size:
            # 如果新闻不够，也搜索论文
            paper_results = db_mgr.search_items(params.query, 'papers', params.filters, page_size - len(results))
            results.extend(paper_results)
        
        return {
            "status": "success",
            "data": {
                "items": results,
                "query": params.query,
                "total": len(results)
            }
        }
    except Exception as e:
        logging.error(f"Search failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"搜索失败: {str(e)}")

# 分析接口
@router.get("/analysis/trends")
async def get_trends(
    days: int = Query(30, ge=7, le=365, description="分析天数"),
    group_by: str = Query("day", description="分组方式(day/week/month)"),
    trend_analyzer: TrendAnalyzer = Depends(get_trend_analyzer)
):
    """获取趋势分析"""
    try:
        result = trend_analyzer.analyze_time_trend(days, group_by)
        return {
            "status": "success",
            "data": result
        }
    except Exception as e:
        logging.error(f"Get trends failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取趋势失败: {str(e)}")

@router.get("/analysis/hot-topics")
async def get_hot_topics(
    days: int = Query(7, ge=1, le=30, description="分析天数"),
    limit: int = Query(10, ge=5, le=20, description="返回数量"),
    trend_analyzer: TrendAnalyzer = Depends(get_trend_analyzer)
):
    """获取热门话题"""
    try:
        result = trend_analyzer.get_trending_topics(days, limit)
        return {
            "status": "success",
            "data": result
        }
    except Exception as e:
        logging.error(f"Get hot topics failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取热门话题失败: {str(e)}")

@router.get("/analysis/popular-papers")
async def get_popular_papers(
    days: int = Query(30, ge=7, le=90, description="分析天数"),
    limit: int = Query(10, ge=5, le=20, description="返回数量"),
    trend_analyzer: TrendAnalyzer = Depends(get_trend_analyzer)
):
    """获取热门论文"""
    try:
        result = trend_analyzer.get_popular_papers(days, limit)
        return {
            "status": "success",
            "data": result
        }
    except Exception as e:
        logging.error(f"Get popular papers failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取热门论文失败: {str(e)}")

@router.get("/analysis/company-activity")
async def get_company_activity(
    days: int = Query(30, ge=7, le=90, description="分析天数"),
    trend_analyzer: TrendAnalyzer = Depends(get_trend_analyzer)
):
    """获取公司活动统计"""
    try:
        result = trend_analyzer.get_company_activity(days)
        return {
            "status": "success",
            "data": result
        }
    except Exception as e:
        logging.error(f"Get company activity failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取公司活动失败: {str(e)}")

# 用户相关接口
@router.post("/users")
async def create_user(
    user: UserCreate,
    user_recommender: UserRecommender = Depends(get_user_recommender)
):
    """创建用户"""
    try:
        user_data = user.dict()
        result = user_recommender.create_or_update_user(user_data)
        
        if result:
            return {
                "status": "success",
                "message": "用户创建成功"
            }
        else:
            raise HTTPException(status_code=400, detail="创建用户失败")
    except Exception as e:
        logging.error(f"Create user failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"创建用户失败: {str(e)}")

@router.get("/recommendations/{user_id}")
async def get_recommendations(
    user_id: str = Path(..., description="用户ID"),
    limit: int = Query(20, ge=5, le=50, description="返回数量"),
    user_recommender: UserRecommender = Depends(get_user_recommender)
):
    """获取用户推荐"""
    try:
        recommendations = user_recommender.get_recommendations(user_id, limit)
        return {
            "status": "success",
            "data": recommendations
        }
    except Exception as e:
        logging.error(f"Get recommendations failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取推荐失败: {str(e)}")

@router.post("/users/{user_id}/view")
async def record_user_view(
    user_id: str = Path(..., description="用户ID"),
    item: Dict[str, Any] = Body(..., description="浏览的内容"),
    user_recommender: UserRecommender = Depends(get_user_recommender)
):
    """记录用户浏览历史"""
    try:
        result = user_recommender.update_user_profile(user_id, item)
        if result:
            return {
                "status": "success",
                "message": "用户浏览记录更新成功"
            }
        else:
            raise HTTPException(status_code=400, detail="更新浏览记录失败")
    except Exception as e:
        logging.error(f"Record user view failed: {str(e)}")
        raise HTTPException(status_code=500, detail=f"记录用户浏览失败: {str(e)}") 