"""
热点趋势API路由
"""

from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.orm import Session
from typing import List, Optional

from ..core.database import get_db, HotTrend
from ..core.logger import app_logger
from ..scrapers.base import BaseScraper
from ..scrapers.weibo_scraper import WeiboScraper
from ..scrapers.zhihu_scraper import ZhihuScraper
from ..scrapers.baidu_scraper import BaiduScraper
from ..scrapers.douyin_scraper import DouyinScraper

router = APIRouter()

# 注册爬虫
SCRAPERS = {
    "weibo": WeiboScraper(),
    "zhihu": ZhihuScraper(),
    "baidu": BaiduScraper(),
    "douyin": DouyinScraper(),
}


@router.get("/", summary="获取热点列表")
async def get_hot_trends(
    source: Optional[str] = None,
    limit: int = 20,
    db: Session = Depends(get_db)
):
    """获取热点趋势列表"""
    
    query = db.query(HotTrend)
    
    if source:
        query = query.filter(HotTrend.source == source)
    
    trends = query.order_by(HotTrend.created_at.desc()).limit(limit).all()
    
    return {
        "status": "success",
        "data": [
            {
                "id": trend.id,
                "title": trend.title,
                "description": trend.description,
                "source": trend.source,
                "rank": trend.rank,
                "heat_score": trend.heat_score,
                "url": trend.url,
                "created_at": trend.created_at.isoformat(),
                "processed": trend.processed
            }
            for trend in trends
        ],
        "total": len(trends)
    }


@router.post("/scrape", summary="手动抓取热点")
async def scrape_hot_trends(
    sources: Optional[List[str]] = Query(default=None),
    db: Session = Depends(get_db)
):
    """手动触发热点抓取"""
    
    if not sources:
        sources = list(SCRAPERS.keys())
    
    results = {}
    
    for source in sources:
        if source not in SCRAPERS:
            app_logger.warning(f"未知的数据源: {source}")
            continue
        
        try:
            scraper = SCRAPERS[source]
            trends = await scraper.scrape()
            
            # 保存到数据库
            saved_count = 0
            for trend_data in trends:
                # 检查是否已存在
                existing = db.query(HotTrend).filter(
                    HotTrend.title == trend_data["title"],
                    HotTrend.source == source
                ).first()
                
                if not existing:
                    trend = HotTrend(
                        title=trend_data["title"],
                        description=trend_data.get("description", ""),
                        source=source,
                        rank=trend_data.get("rank", 0),
                        heat_score=trend_data.get("heat_score", 0),
                        url=trend_data.get("url", "")
                    )
                    db.add(trend)
                    saved_count += 1
            
            db.commit()
            
            results[source] = {
                "status": "success",
                "scraped": len(trends),
                "saved": saved_count
            }
            
            app_logger.info(f"成功抓取 {source} 热点: {len(trends)} 条，保存: {saved_count} 条")
            
        except Exception as e:
            app_logger.error(f"抓取 {source} 热点失败: {str(e)}")
            results[source] = {
                "status": "error",
                "message": str(e)
            }
    
    return {
        "status": "completed",
        "results": results
    }


@router.get("/sources", summary="获取数据源列表")
async def get_sources():
    """获取可用的数据源列表"""
    
    return {
        "status": "success",
        "sources": [
            {
                "key": key,
                "name": scraper.name,
                "description": scraper.description
            }
            for key, scraper in SCRAPERS.items()
        ]
    }