"""
热点数据API接口
提供热点数据的查询、搜索、趋势分析和聚合功能
"""
import hashlib
import json
from datetime import datetime, timedelta
from typing import Optional, List, Dict, Any
from fastapi import APIRouter, Query, Path, HTTPException, Depends, BackgroundTasks
from fastapi.responses import JSONResponse
from motor.motor_asyncio import AsyncIOMotorClient
import redis
import os
import logging
from bson import ObjectId

from ..models.hot_data import (
    Platform, SortOrder, SortBy, TrendDirection,
    PaginationParams, FilterParams, SortParams, SearchParams,
    HotItemBase, HotItemDetail, HotItemTrend, HotItemAggregate,
    SuccessResponse, ErrorResponse, PaginatedResponse,
    HotItemListResponse, HotItemSearchResponse,
    HotItemTrendResponse, HotItemAggregateResponse
)

logger = logging.getLogger(__name__)

# 创建路由器
router = APIRouter(prefix="/api/v1", tags=["hot-items"])

# 导入配置
from .config import settings

# 数据库连接
mongo_client = AsyncIOMotorClient(settings.mongodb_uri)
db = mongo_client[settings.mongodb_db]

# Redis连接
redis_client = redis.Redis(
    host=settings.redis_host,
    port=settings.redis_port,
    password=settings.redis_password,
    db=settings.redis_db,
    decode_responses=True
)


def generate_cache_key(prefix: str, **kwargs) -> str:
    """生成缓存键"""
    params_str = json.dumps(kwargs, sort_keys=True, default=str)
    params_hash = hashlib.md5(params_str.encode()).hexdigest()
    return f"{prefix}:{params_hash}"


async def get_from_cache(key: str) -> Optional[Dict]:
    """从缓存获取数据"""
    try:
        data = redis_client.get(key)
        if data:
            return json.loads(data)
    except Exception as e:
        logger.warning(f"Cache read error: {e}")
    return None


async def set_cache(key: str, data: Dict, ttl: int = 3600):
    """设置缓存"""
    try:
        redis_client.setex(key, ttl, json.dumps(data, default=str))
    except Exception as e:
        logger.warning(f"Cache write error: {e}")


@router.get("/hot-items", response_model=SuccessResponse)
async def get_hot_items(
    pagination: PaginationParams = Depends(),
    filters: FilterParams = Depends(),
    sort: SortParams = Depends(),
    use_cache: bool = Query(True, description="是否使用缓存")
):
    """
    获取热点列表
    
    支持分页、排序和多维度筛选
    - 平台筛选：支持多平台选择
    - 时间筛选：支持时间范围查询
    - 热度筛选：支持最小热度值过滤
    - 排名筛选：支持最大排名限制
    """
    # 生成缓存键
    cache_key = generate_cache_key(
        "cache:hot:list",
        page=pagination.page,
        page_size=pagination.page_size,
        filters=filters.dict(),
        sort=sort.dict()
    )
    
    # 尝试从缓存获取
    if use_cache:
        cached_data = await get_from_cache(cache_key)
        if cached_data:
            return SuccessResponse(
                message="Data from cache",
                data=cached_data
            )
    
    # 构建查询条件（不包含平台筛选，因为我们是分别查询每个平台的collection）
    query = {}
    
    # 时间范围筛选 - 如果没有指定时间范围，默认获取最近3天的数据（考虑到时区差异）
    if filters.start_date or filters.end_date:
        time_filter = {}
        if filters.start_date:
            time_filter["$gte"] = filters.start_date
        if filters.end_date:
            time_filter["$lte"] = filters.end_date
        query["crawled_at"] = time_filter
    else:
        # 默认获取最近3天的数据，确保能获取到最新数据（考虑时区因素）
        from datetime import datetime, timedelta
        three_days_ago = datetime.utcnow() - timedelta(days=3)
        query["crawled_at"] = {"$gte": three_days_ago}
    
    # 热度筛选
    if filters.min_heat:
        query["heat_value_numeric"] = {"$gte": filters.min_heat}
    
    # 排名筛选  
    if filters.max_rank:
        query["rank"] = {"$lte": filters.max_rank}
    
    # 分类筛选
    if filters.category:
        query["category"] = filters.category
    
    # 计算分页
    skip = (pagination.page - 1) * pagination.page_size
    limit = pagination.page_size
    
    # 排序
    sort_field = sort.sort_by.value
    sort_direction = 1 if sort.order == SortOrder.ASC else -1
    
    try:
        # 调试：打印接收到的平台参数
        logger.info(f"Received platforms filter: {filters.platforms}")
        
        # 聚合查询以获取多平台数据
        pipeline = []
        
        # 按平台分组获取数据
        platforms_data = {}
        platforms_to_query = filters.platforms if filters.platforms else list(Platform)
        logger.info(f"Platforms to query: {[p.value for p in platforms_to_query]}")
        
        total_items = 0
        
        for platform in platforms_to_query:
            collection_name = f"{platform.value}_hot_items"
            collection = db[collection_name]
            
            # 查询该平台的数据，限制每个平台返回20条
            cursor = collection.find(query).sort(
                sort_field, sort_direction
            ).limit(20)  # 每个平台最多20条
            
            platform_items = []
            seen_titles = set()  # 添加去重逻辑
            
            async for item in cursor:
                # 转换ObjectId为字符串
                item["item_id"] = str(item.pop("_id"))
                item["platform"] = platform.value
                
                # 标题去重 - 使用标题的前20个字符作为去重键，并加上平台标识
                title_key = f"{platform.value}:{item.get('title', '')[:20].strip()}"
                if title_key in seen_titles:
                    continue
                seen_titles.add(title_key)
                
                # 确保必要字段存在
                if "updated_at" not in item:
                    item["updated_at"] = item.get("crawled_at", datetime.utcnow())
                if "excerpt" not in item:
                    item["excerpt"] = item.get("description", "")[:200]
                
                # 确保heat_value_numeric字段存在并正确解析
                if "heat_value_numeric" not in item:
                    item["heat_value_numeric"] = parse_heat_value_for_sort(item.get("heat_value", "0"))
                
                try:
                    platform_items.append(HotItemBase(**item).dict())
                except Exception as e:
                    logger.warning(f"Failed to parse item from {platform.value}: {e}")
            
            if platform_items:
                platforms_data[platform.value] = platform_items
                total_items += len(platform_items)
        
        response_data = {
            "platforms": platforms_data,
            "pagination": {
                "page": pagination.page,
                "page_size": pagination.page_size,
                "total": total_items,
                "total_pages": 1  # 简化分页
            }
        }
        
        # 设置缓存
        await set_cache(cache_key, response_data, ttl=3600)
        
        return SuccessResponse(
            message="Success",
            data=response_data
        )
        
    except Exception as e:
        logger.error(f"Failed to get hot items: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"Internal server error: {str(e)}"
        )


@router.get("/hot-items/{item_id}", response_model=SuccessResponse)
async def get_hot_item_detail(
    item_id: str = Path(..., description="热点项目ID"),
    platform: Platform = Query(..., description="平台名称"),
    include_comments: bool = Query(True, description="是否包含评论"),
    comment_limit: int = Query(10, ge=1, le=50, description="评论数量限制")
):
    """
    获取热点详情
    
    包含完整内容、评论和媒体信息
    """
    # 缓存键
    cache_key = f"cache:hot:item:{platform.value}:{item_id}"
    
    # 尝试从缓存获取
    cached_data = await get_from_cache(cache_key)
    if cached_data:
        return SuccessResponse(
            message="Data from cache",
            data=cached_data
        )
    
    try:
        # 查询主数据
        collection = db[f"{platform.value}_hot_items"]
        
        # 尝试作为ObjectId查询
        try:
            item = await collection.find_one({"_id": ObjectId(item_id)})
        except:
            # 如果不是有效的ObjectId，尝试其他字段
            item = await collection.find_one({
                "$or": [
                    {"question_id": item_id},
                    {"post_id": item_id},
                    {"video_id": item_id}
                ]
            })
        
        if not item:
            raise HTTPException(
                status_code=404,
                detail=f"Item {item_id} not found on platform {platform.value}"
            )
        
        # 转换数据
        item["item_id"] = str(item.pop("_id", item_id))
        item["platform"] = platform.value
        
        # 如果需要评论，查询评论集合
        if include_comments and item.get("has_comments"):
            comments_collection = db[f"{platform.value}_comments"]
            comments = []
            
            cursor = comments_collection.find({
                "parent_id": item_id
            }).sort("vote_count", -1).limit(comment_limit)
            
            async for comment in cursor:
                comment.pop("_id", None)
                comments.append(comment)
            
            item["comments"] = comments
        
        # 构建详情响应
        detail = HotItemDetail(**item)
        
        # 设置缓存
        await set_cache(cache_key, detail.dict(), ttl=1800)  # 30分钟
        
        return SuccessResponse(
            message="Success",
            data=detail.dict()
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get item detail: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"Internal server error: {str(e)}"
        )


@router.get("/hot-items/search", response_model=SuccessResponse)
async def search_hot_items(
    search: SearchParams = Depends(),
    pagination: PaginationParams = Depends(),
    platforms: Optional[List[Platform]] = Query(None, description="限定平台")
):
    """
    搜索热点内容
    
    支持全文搜索和高亮显示
    """
    try:
        all_results = []
        total_count = 0
        
        # 确定要搜索的平台
        search_platforms = platforms if platforms else list(Platform)
        
        for platform in search_platforms:
            collection = db[f"{platform.value}_hot_items"]
            
            # 构建搜索查询
            search_query = {
                "$or": [
                    {"title": {"$regex": search.keyword, "$options": "i"}},
                    {"excerpt": {"$regex": search.keyword, "$options": "i"}},
                    {"content": {"$regex": search.keyword, "$options": "i"}}
                ]
            } if search.match_type == "fuzzy" else {
                "$or": [
                    {"title": search.keyword},
                    {"excerpt": search.keyword}
                ]
            }
            
            # 执行搜索
            skip = (pagination.page - 1) * pagination.page_size
            cursor = collection.find(search_query).skip(skip).limit(pagination.page_size)
            
            async for item in cursor:
                item["item_id"] = str(item.pop("_id"))
                item["platform"] = platform.value
                
                # 高亮处理
                if search.highlight:
                    for field in ["title", "excerpt"]:
                        if field in item and search.keyword.lower() in item[field].lower():
                            item[f"{field}_highlighted"] = item[field].replace(
                                search.keyword,
                                f"<mark>{search.keyword}</mark>"
                            )
                
                all_results.append(item)
            
            # 统计总数
            count = await collection.count_documents(search_query)
            total_count += count
        
        response_data = {
            "items": all_results,
            "total": total_count,
            "keyword": search.keyword,
            "pagination": {
                "page": pagination.page,
                "page_size": pagination.page_size,
                "total_pages": (total_count + pagination.page_size - 1) // pagination.page_size
            }
        }
        
        return SuccessResponse(
            message="Search completed",
            data=response_data
        )
        
    except Exception as e:
        logger.error(f"Search failed: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"Search error: {str(e)}"
        )


@router.get("/hot-items/trends", response_model=SuccessResponse)
async def get_hot_trends(
    platform: Optional[Platform] = Query(None, description="平台"),
    period_hours: int = Query(24, ge=1, le=168, description="统计周期（小时）"),
    limit: int = Query(20, ge=1, le=100, description="返回数量")
):
    """
    获取热点趋势
    
    分析指定时间段内的热度变化和排名升降
    """
    cache_key = generate_cache_key(
        "cache:hot:trends",
        platform=platform,
        period_hours=period_hours,
        limit=limit
    )
    
    # 尝试从缓存获取
    cached_data = await get_from_cache(cache_key)
    if cached_data:
        return SuccessResponse(
            message="Data from cache",
            data=cached_data
        )
    
    try:
        end_time = datetime.utcnow()
        start_time = end_time - timedelta(hours=period_hours)
        
        trends = []
        platforms_to_check = [platform] if platform else list(Platform)
        
        for plat in platforms_to_check:
            collection = db[f"{plat.value}_hot_items"]
            
            # 获取当前热点
            current_items = {}
            cursor = collection.find({
                "crawled_at": {"$gte": end_time - timedelta(hours=1)}
            }).sort("rank", 1).limit(limit)
            
            async for item in cursor:
                item_key = item.get("title", "")[:50]  # 使用标题前50字符作为键
                current_items[item_key] = {
                    "current_rank": item.get("rank"),
                    "title": item.get("title"),
                    "heat_value": item.get("heat_value"),
                    "item_id": str(item.get("_id"))
                }
            
            # 获取历史数据计算趋势
            for item_key, current_data in current_items.items():
                # 查询历史排名
                history = []
                for hour in range(0, period_hours, max(1, period_hours // 24)):
                    check_time = start_time + timedelta(hours=hour)
                    
                    hist_item = await collection.find_one({
                        "title": {"$regex": f"^{item_key[:30]}", "$options": "i"},
                        "crawled_at": {
                            "$gte": check_time,
                            "$lt": check_time + timedelta(hours=1)
                        }
                    })
                    
                    if hist_item:
                        history.append({
                            "time": check_time,
                            "rank": hist_item.get("rank"),
                            "heat_value": hist_item.get("heat_value")
                        })
                
                # 计算趋势
                if history:
                    first_rank = history[0].get("rank", 999)
                    current_rank = current_data["current_rank"]
                    rank_change = first_rank - current_rank
                    
                    # 判断趋势方向
                    if len(history) == 1:
                        direction = TrendDirection.NEW
                    elif rank_change > 5:
                        direction = TrendDirection.UP
                    elif rank_change < -5:
                        direction = TrendDirection.DOWN
                    else:
                        direction = TrendDirection.STABLE
                    
                    trend = HotItemTrend(
                        item_id=current_data["item_id"],
                        title=current_data["title"],
                        platform=plat,
                        current_rank=current_rank,
                        previous_rank=first_rank if len(history) > 1 else None,
                        rank_change=rank_change,
                        trend_direction=direction,
                        heat_values=history,
                        first_seen=history[0]["time"],
                        last_seen=end_time,
                        duration_hours=(end_time - history[0]["time"]).total_seconds() / 3600
                    )
                    
                    trends.append(trend.dict())
        
        # 排序：优先上升趋势
        trends.sort(key=lambda x: (
            x["trend_direction"] == "up",
            abs(x.get("rank_change", 0))
        ), reverse=True)
        
        response_data = {
            "trends": trends[:limit],
            "period": f"{period_hours}h",
            "platform": platform.value if platform else "all",
            "summary": {
                "total_items": len(trends),
                "rising_count": sum(1 for t in trends if t["trend_direction"] == "up"),
                "new_count": sum(1 for t in trends if t["trend_direction"] == "new"),
                "stable_count": sum(1 for t in trends if t["trend_direction"] == "stable")
            }
        }
        
        # 缓存10分钟
        await set_cache(cache_key, response_data, ttl=600)
        
        return SuccessResponse(
            message="Trends analysis completed",
            data=response_data
        )
        
    except Exception as e:
        logger.error(f"Trends analysis failed: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"Trends analysis error: {str(e)}"
        )


@router.get("/hot-items/aggregate", response_model=SuccessResponse)
async def get_aggregated_hot_items(
    min_platforms: int = Query(2, ge=1, le=9, description="最少出现平台数"),
    similarity_threshold: float = Query(0.7, ge=0, le=1, description="相似度阈值"),
    limit: int = Query(20, ge=1, le=100, description="返回数量")
):
    """
    聚合多平台热点
    
    识别跨平台热门话题，合并相似内容
    """
    cache_key = generate_cache_key(
        "cache:hot:aggregate",
        min_platforms=min_platforms,
        similarity_threshold=similarity_threshold,
        limit=limit
    )
    
    # 尝试从缓存获取
    cached_data = await get_from_cache(cache_key)
    if cached_data:
        return SuccessResponse(
            message="Data from cache",
            data=cached_data
        )
    
    try:
        # 获取所有平台最新数据
        all_items = []
        for platform in Platform:
            collection = db[f"{platform.value}_hot_items"]
            
            # 获取最新的热点
            cursor = collection.find({
                "crawled_at": {"$gte": datetime.utcnow() - timedelta(hours=2)}
            }).sort("rank", 1).limit(50)
            
            async for item in cursor:
                item["item_id"] = str(item.pop("_id"))
                item["platform"] = platform.value
                all_items.append(item)
        
        # 简单的标题相似度聚合
        aggregates = []
        processed = set()
        
        for i, item in enumerate(all_items):
            if i in processed:
                continue
            
            # 找相似项目
            similar_items = [item]
            item_platforms = [item["platform"]]
            
            for j, other in enumerate(all_items[i+1:], i+1):
                if j in processed:
                    continue
                
                # 简单的相似度计算（基于标题）
                similarity = calculate_similarity(
                    item.get("title", ""),
                    other.get("title", "")
                )
                
                if similarity >= similarity_threshold:
                    similar_items.append(other)
                    item_platforms.append(other["platform"])
                    processed.add(j)
            
            # 如果满足最少平台数要求
            if len(set(item_platforms)) >= min_platforms:
                # 计算聚合数据
                total_heat = sum(
                    parse_heat_value(si.get("heat_value", "0"))
                    for si in similar_items
                )
                
                avg_rank = sum(si.get("rank", 50) for si in similar_items) / len(similar_items)
                
                aggregate = HotItemAggregate(
                    title=item.get("title", ""),
                    platforms=list(set(item_platforms)),
                    total_heat=total_heat,
                    average_rank=avg_rank,
                    items=[HotItemBase(**si) for si in similar_items],
                    similarity_score=similarity_threshold,
                    category=item.get("category")
                )
                
                aggregates.append(aggregate.dict())
            
            processed.add(i)
        
        # 按总热度排序
        aggregates.sort(key=lambda x: x["total_heat"], reverse=True)
        
        response_data = {
            "aggregates": aggregates[:limit],
            "total_items": len(all_items),
            "platforms_included": list(Platform),
            "aggregation_time": datetime.utcnow(),
            "deduplication_count": len(all_items) - len(aggregates)
        }
        
        # 缓存1小时
        await set_cache(cache_key, response_data, ttl=3600)
        
        return SuccessResponse(
            message="Aggregation completed",
            data=response_data
        )
        
    except Exception as e:
        logger.error(f"Aggregation failed: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"Aggregation error: {str(e)}"
        )


def calculate_similarity(str1: str, str2: str) -> float:
    """计算两个字符串的相似度"""
    if not str1 or not str2:
        return 0.0
    
    # 简单的Jaccard相似度
    set1 = set(str1.lower())
    set2 = set(str2.lower())
    
    intersection = set1.intersection(set2)
    union = set1.union(set2)
    
    if not union:
        return 0.0
    
    return len(intersection) / len(union)


def parse_heat_value(heat_str: str) -> int:
    """解析热度值字符串为数字"""
    if not heat_str:
        return 0
    
    heat_str = str(heat_str).replace(",", "")
    
    if "亿" in heat_str:
        return int(float(heat_str.replace("亿", "")) * 100000000)
    elif "万" in heat_str:
        return int(float(heat_str.replace("万", "")) * 10000)
    else:
        try:
            return int(heat_str)
        except:
            return 0


def parse_heat_value_for_sort(heat_str: str) -> int:
    """解析热度值用于排序"""
    if not heat_str:
        return 0
    
    heat_str = str(heat_str).replace(",", "").strip()
    
    # 处理空字符串或"0"
    if not heat_str or heat_str == "0":
        return 0
    
    try:
        if "亿" in heat_str:
            num = float(heat_str.replace("亿", ""))
            return int(num * 100000000)
        elif "万" in heat_str:
            num = float(heat_str.replace("万", ""))
            return int(num * 10000)
        else:
            return int(float(heat_str))
    except (ValueError, TypeError):
        return 0


# 手动数据同步端点
@router.post("/sync-data", response_model=SuccessResponse)
async def sync_data_manually():
    """手动触发数据同步"""
    try:
        from .scheduler import scheduler
        await scheduler.sync_all_platforms()
        return SuccessResponse(
            message="数据同步已触发",
            data={"sync_time": datetime.utcnow().isoformat()}
        )
    except Exception as e:
        logger.error(f"Manual sync failed: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"同步失败: {str(e)}"
        )


# API文档配置
router.tags_metadata = [
    {
        "name": "hot-items",
        "description": "热点数据API接口，提供多平台热点内容的查询、搜索、趋势分析和聚合功能",
        "externalDocs": {
            "description": "API文档",
            "url": "/api/docs",
        },
    }
]