"""Hot topic data loader implementation."""

import aiohttp
import asyncio
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime, timedelta
import json
import redis.asyncio as redis
from langchain.schema import Document

from ..core.config import settings

logger = logging.getLogger(__name__)


class HotDataLoader:
    """Load and cache hot topic data from crawler service."""
    
    def __init__(self, redis_client: Optional[redis.Redis] = None):
        """Initialize hot data loader.
        
        Args:
            redis_client: Optional Redis client for caching
        """
        self.crawler_api_base = getattr(settings, 'crawler_api_url', 'http://localhost:8001/api/v1')
        self.redis_client = redis_client
        self.cache_ttl = 300  # 5 minutes cache TTL
        
    async def load_hot_item(self, item_id: str, platform: str) -> Optional[Dict[str, Any]]:
        """Load hot topic item data.
        
        Args:
            item_id: ID of the hot topic item
            platform: Platform name (weibo, douyin, baidu, etc.)
            
        Returns:
            Hot topic data dictionary or None if not found
        """
        cache_key = f"hot_item:{platform}:{item_id}"
        
        # Try cache first
        if self.redis_client:
            try:
                cached_data = await self.redis_client.get(cache_key)
                if cached_data:
                    logger.info(f"Hot item cache hit: {cache_key}")
                    return json.loads(cached_data)
            except Exception as e:
                logger.warning(f"Redis cache read error: {e}")
        
        # Fetch from crawler API
        try:
            async with aiohttp.ClientSession() as session:
                url = f"{self.crawler_api_base}/hot-items/{item_id}"
                params = {"platform": platform}
                
                logger.info(f"Fetching hot item from API: {url}")
                async with session.get(url, params=params, timeout=10) as response:
                    if response.status == 200:
                        data = await response.json()
                        
                        # Extract actual item data
                        if data.get("success") and data.get("data"):
                            item_data = data["data"]
                            
                            # Cache the result
                            if self.redis_client:
                                try:
                                    await self.redis_client.setex(
                                        cache_key, 
                                        self.cache_ttl, 
                                        json.dumps(item_data, ensure_ascii=False)
                                    )
                                except Exception as e:
                                    logger.warning(f"Redis cache write error: {e}")
                            
                            return item_data
                        else:
                            logger.warning(f"Invalid API response format: {data}")
                            return None
                    else:
                        logger.error(f"API request failed: {response.status}")
                        return None
            
        except Exception as e:
            logger.error(f"Error loading hot item {item_id}: {e}")
            return None
    
    async def load_hot_item_with_trends(self, item_id: str, platform: str) -> Optional[Dict[str, Any]]:
        """Load hot topic item with trend data.
        
        Args:
            item_id: ID of the hot topic item
            platform: Platform name
            
        Returns:
            Hot topic data with trends or None if not found
        """
        # Load basic item data
        item_data = await self.load_hot_item(item_id, platform)
        if not item_data:
            return None
        
        # Load trend data
        try:
            async with aiohttp.ClientSession() as session:
                url = f"{self.crawler_api_base}/hot-items/trends/{item_id}"
                
                logger.info(f"Fetching trend data from API: {url}")
                async with session.get(url, timeout=10) as response:
                    if response.status == 200:
                        trend_data = await response.json()
                        if trend_data.get("success") and trend_data.get("data"):
                            item_data["trends"] = trend_data["data"]
                    else:
                        logger.warning(f"Trend data not available: {response.status}")
                        item_data["trends"] = []
            
        except Exception as e:
            logger.warning(f"Error loading trend data: {e}")
            item_data["trends"] = []
        
        return item_data
    
    async def load_latest_hot_items(
        self,
        platforms: List[str], 
        limit: int = 50,
        time_range: str = "today"
    ) -> List[Dict[str, Any]]:
        """Load latest hot items from multiple platforms.
        
        Args:
            platforms: List of platform names
            limit: Maximum number of items to return
            time_range: Time range filter (today, week, month)
            
        Returns:
            List of hot topic items
        """
        cache_key = f"hot_items:latest:{':'.join(platforms)}:{time_range}:{limit}"
        
        # Try cache first
        if self.redis_client:
            try:
                cached_data = await self.redis_client.get(cache_key)
                if cached_data:
                    logger.info(f"Hot items cache hit: {cache_key}")
                    return json.loads(cached_data)
            except Exception as e:
                logger.warning(f"Redis cache read error: {e}")
        
        # Fetch from crawler API
        try:
            async with aiohttp.ClientSession() as session:
                url = f"{self.crawler_api_base}/hot-items"
                params = {
                    "platforms": platforms,
                    "page_size": limit,
                    "time_range": time_range,
                    "sort_by": "heat",
                    "sort_order": "desc"
                }
                
                logger.info(f"Fetching hot items from API: {url}")
                async with session.get(url, params=params, timeout=15) as response:
                    if response.status == 200:
                        data = await response.json()
                        
                        if data.get("success") and data.get("data"):
                            # Extract items from platform-grouped data
                            items = []
                            if "platforms" in data["data"]:
                                for platform_items in data["data"]["platforms"].values():
                                    items.extend(platform_items)
                            elif "items" in data["data"]:
                                items = data["data"]["items"]
                            
                            # Sort by heat score and limit
                            items.sort(key=lambda x: x.get("heat_score", 0), reverse=True)
                            items = items[:limit]
                            
                            # Cache the result
                            if self.redis_client:
                                try:
                                    await self.redis_client.setex(
                                        cache_key, 
                                        self.cache_ttl, 
                                        json.dumps(items, ensure_ascii=False)
                                    )
                                except Exception as e:
                                    logger.warning(f"Redis cache write error: {e}")
                            
                            return items
                        else:
                            logger.warning(f"Invalid API response format: {data}")
                            return []
                    else:
                        logger.error(f"API request failed: {response.status}")
                        return []
            
        except Exception as e:
            logger.error(f"Error loading hot items: {e}")
            return []
    
    async def search_hot_items(
        self,
        keyword: str, 
        platforms: Optional[List[str]] = None,
        limit: int = 20
    ) -> List[Dict[str, Any]]:
        """Search hot items by keyword.
        
        Args:
            keyword: Search keyword
            platforms: Optional list of platforms to search
            limit: Maximum number of results
            
        Returns:
            List of matching hot topic items
        """
        try:
            async with aiohttp.ClientSession() as session:
                url = f"{self.crawler_api_base}/hot-items/search"
                params = {
                    "keyword": keyword,
                    "page_size": limit
                }
                
                if platforms:
                    params["platforms"] = platforms
                
                logger.info(f"Searching hot items: {keyword}")
                async with session.get(url, params=params, timeout=10) as response:
                    if response.status == 200:
                        data = await response.json()
                        
                        if data.get("success") and data.get("data"):
                            return data["data"].get("items", [])
                        else:
                            return []
                    else:
                        logger.error(f"Search API request failed: {response.status}")
                        return []
            
        except Exception as e:
            logger.error(f"Error searching hot items: {e}")
            return []
    
    def convert_to_documents(self, hot_items: List[Dict[str, Any]]) -> List[Document]:
        """Convert hot topic items to LangChain Document format.
        
        Args:
            hot_items: List of hot topic items
            
        Returns:
            List of LangChain Document objects
        """
        documents = []
        
        for item in hot_items:
            # Build document content
            content_parts = []
            
            if item.get("title"):
                content_parts.append(f"标题: {item['title']}")
            
            if item.get("excerpt"):
                content_parts.append(f"摘要: {item['excerpt']}")
            
            if item.get("content"):
                content_parts.append(f"内容: {item['content']}")
            
            # Add top comments if available
            if item.get("comments"):
                top_comments = item["comments"][:3]  # Top 3 comments
                if top_comments:
                    comments_text = "\n".join([
                        f"评论: {comment.get('content', '')}" 
                        for comment in top_comments
                    ])
                    content_parts.append(f"热门评论:\n{comments_text}")
            
            content = "\n\n".join(content_parts)
            
            # Build metadata
            metadata = {
                "item_id": item.get("item_id", ""),
                "platform": item.get("platform", ""),
                "heat_score": item.get("heat_score", 0),
                "publish_time": item.get("publish_time", ""),
                "url": item.get("url", ""),
                "category": item.get("category", ""),
                "tags": item.get("tags", [])
            }
            
            # Create document
            doc = Document(
                page_content=content,
                metadata=metadata
            )
            documents.append(doc)
        
        return documents
    
    async def get_related_hot_items(
        self,
        item_id: str, 
        platform: str, 
        limit: int = 5
    ) -> List[Dict[str, Any]]:
        """Get related hot items based on similarity.
        
        Args:
            item_id: ID of the reference hot topic item
            platform: Platform name
            limit: Maximum number of related items
            
        Returns:
            List of related hot topic items
        """
        # Load the reference item
        ref_item = await self.load_hot_item(item_id, platform)
        if not ref_item:
            return []
        
        # Extract keywords from title for similarity search
        title = ref_item.get("title", "")
        if not title:
            return []
        
        # Simple keyword extraction (can be improved with NLP)
        keywords = [word for word in title.split() if len(word) > 1][:3]
        
        if not keywords:
            return []
        
        # Search for related items
        related_items = []
        for keyword in keywords:
            items = await self.search_hot_items(keyword, limit=limit*2)
            related_items.extend(items)
        
        # Remove duplicates and the reference item itself
        seen_ids = set()
        unique_items = []
        
        for item in related_items:
            item_key = f"{item.get('platform', '')}:{item.get('item_id', '')}"
            ref_key = f"{platform}:{item_id}"
            
            if item_key != ref_key and item_key not in seen_ids:
                seen_ids.add(item_key)
                unique_items.append(item)
        
        # Sort by heat score and limit
        unique_items.sort(key=lambda x: x.get("heat_score", 0), reverse=True)
        return unique_items[:limit]