"""今日头条热点爬虫实现"""
import asyncio
import hashlib
import json
import logging
import re
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Tuple
from urllib.parse import urljoin, urlparse, quote

import aiohttp
from bs4 import BeautifulSoup

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class ToutiaoHotSpider(BaseSpider):
    """今日头条热点爬虫"""
    
    # API端点配置
    TOUTIAO_FEED_API = "https://www.toutiao.com/api/pc/feed/"
    TOUTIAO_ARTICLE_API = "https://www.toutiao.com/article/v2/tab_comments/"
    TOUTIAO_RELATED_API = "https://www.toutiao.com/api/pc/related/feed"
    
    # 分类映射
    CATEGORY_MAPPING = {
        "news_hot": "要闻",        # 热点新闻
        "news_society": "社会",    # 社会新闻
        "news_entertainment": "娱乐",  # 娱乐新闻
        "news_tech": "科技"        # 科技新闻
    }
    
    # 限流配置（毫秒）
    RATE_LIMIT = 1500  # 1.5秒间隔
    MAX_CONCURRENT_REQUESTS = 5
    
    def __init__(self, mongodb_client=None, redis_client=None):
        """初始化头条爬虫
        
        Args:
            mongodb_client: MongoDB客户端
            redis_client: Redis客户端（用于限流）
        """
        super().__init__(name="ToutiaoHotSpider")
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
        self.last_request_time = 0
        self.request_semaphore = asyncio.Semaphore(self.MAX_CONCURRENT_REQUESTS)
        
        # 更新请求头，模拟真实浏览器
        self.headers.update({
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Referer': 'https://www.toutiao.com',
            'Origin': 'https://www.toutiao.com',
            'X-Requested-With': 'XMLHttpRequest',
            'Cookie': ''  # 实际使用时可能需要cookie
        })
    
    async def fetch_with_rate_limit(self, url: str, **kwargs) -> Optional[str]:
        """带限流的请求方法
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            响应内容，失败返回None
        """
        async with self.request_semaphore:
            # 实现速率限制
            current_time = asyncio.get_event_loop().time()
            time_since_last = (current_time - self.last_request_time) * 1000
            if time_since_last < self.RATE_LIMIT:
                await asyncio.sleep((self.RATE_LIMIT - time_since_last) / 1000)
            
            self.last_request_time = asyncio.get_event_loop().time()
            return await self.fetch(url, **kwargs)
    
    async def fetch_feed_by_category(self, category: str, max_behot_time: int = 0) -> List[Dict[str, Any]]:
        """按分类获取新闻Feed
        
        Args:
            category: 分类ID
            max_behot_time: 时间戳，用于分页
            
        Returns:
            新闻列表
        """
        params = {
            'channel_id': category,
            'max_behot_time': max_behot_time,
            'category': category,
            'utm_source': 'toutiao',
            'visit_user_id': '0',  # 匿名用户
            'format': 'json'
        }
        
        url = f"{self.TOUTIAO_FEED_API}?{'&'.join([f'{k}={v}' for k, v in params.items()])}"
        
        try:
            response_text = await self.fetch_with_rate_limit(url)
            if not response_text:
                return []
            
            data = json.loads(response_text)
            articles = []
            
            for item in data.get('data', []):
                # 过滤广告和非文章内容
                if item.get('tag') == 'ad' or not item.get('title'):
                    continue
                
                article = await self.parse_article_item(item, category)
                if article:
                    articles.append(article)
            
            return articles
            
        except Exception as e:
            logger.error(f"获取分类 {category} 新闻失败: {e}")
            return []
    
    async def parse_article_item(self, item: Dict[str, Any], category: str) -> Optional[Dict[str, Any]]:
        """解析单个文章项
        
        Args:
            item: 原始文章数据
            category: 分类
            
        Returns:
            解析后的文章数据
        """
        try:
            # 生成URL哈希用于去重
            article_url = item.get('source_url', '') or item.get('article_url', '')
            if article_url and not article_url.startswith('http'):
                article_url = f"https://www.toutiao.com{article_url}"
            
            if not article_url:
                return None
            
            url_hash = self.generate_hash(article_url)
            
            # 提取基础信息
            title = item.get('title', '').strip()
            if not title:
                return None
            
            # 解析时间（处理相对时间）
            publish_time = self.parse_relative_time(item.get('behot_time', 0))
            
            # 提取摘要和关键词
            abstract = item.get('abstract', '') or item.get('summary', '')
            keywords = await self.extract_keywords(title, abstract)
            
            # 构建文章数据
            article_data = {
                'url_hash': url_hash,
                'url': article_url,
                'title': title,
                'category': self.CATEGORY_MAPPING.get(category, category),
                'summary': abstract,
                'keywords': keywords,
                'source': 'toutiao',
                'content': {
                    'metadata': {
                        'read_count': item.get('read_count', 0),
                        'comment_count': item.get('comment_count', 0),
                        'share_count': item.get('share_count', 0),
                        'image_urls': item.get('image_list', []) or [item.get('middle_image', {}).get('url', '')],
                        'has_video': item.get('has_video', False),
                        'video_duration': item.get('video_duration', 0),
                        'source_name': item.get('source', ''),  # 媒体来源
                        'group_id': item.get('group_id', ''),   # 文章组ID
                        'item_id': item.get('item_id', '')      # 文章ID
                    },
                    'comments': [],  # 评论将单独获取
                    'related_articles': []  # 相关推荐将单独获取
                },
                'publish_time': publish_time,
                'update_time': datetime.utcnow(),
                'fetch_time': datetime.utcnow(),
                'expire_at': datetime.utcnow() + timedelta(days=30)
            }
            
            return article_data
            
        except Exception as e:
            logger.error(f"解析文章失败: {e}")
            return None
    
    def parse_relative_time(self, timestamp: int) -> datetime:
        """解析时间戳或相对时间
        
        Args:
            timestamp: Unix时间戳
            
        Returns:
            datetime对象
        """
        try:
            if timestamp:
                # 头条的时间戳通常是秒级
                return datetime.fromtimestamp(timestamp)
            else:
                return datetime.utcnow()
        except Exception:
            return datetime.utcnow()
    
    async def extract_keywords(self, title: str, abstract: str = "") -> List[str]:
        """提取关键词（简单实现）
        
        Args:
            title: 标题
            abstract: 摘要
            
        Returns:
            关键词列表
        """
        # 简单的关键词提取逻辑
        # 实际项目中应使用jieba分词或其他NLP库
        text = f"{title} {abstract}"
        
        # 移除特殊字符
        text = re.sub(r'[^\w\s]', ' ', text)
        
        # 分词（简单按空格分）
        words = text.split()
        
        # 过滤停用词（示例）
        stop_words = {'的', '了', '是', '在', '和', '有', '与', '为', '等', '及', '或', '但', '而'}
        keywords = [w for w in words if len(w) > 1 and w not in stop_words]
        
        # 返回前10个关键词
        return list(set(keywords[:10]))
    
    async def fetch_comments(self, item_id: str, offset: int = 0, limit: int = 20) -> List[Dict[str, Any]]:
        """获取文章评论
        
        Args:
            item_id: 文章ID
            offset: 偏移量
            limit: 限制数量
            
        Returns:
            评论列表
        """
        params = {
            'group_id': item_id,
            'item_id': item_id,
            'offset': offset,
            'count': limit,
            'format': 'json'
        }
        
        url = f"{self.TOUTIAO_ARTICLE_API}?{'&'.join([f'{k}={v}' for k, v in params.items()])}"
        
        try:
            response_text = await self.fetch_with_rate_limit(url)
            if not response_text:
                return []
            
            data = json.loads(response_text)
            comments = []
            
            for comment in data.get('data', {}).get('comments', []):
                # 作者信息脱敏
                author_name = comment.get('user', {}).get('name', 'anonymous')
                author_hash = self.generate_hash(author_name)
                
                comment_data = {
                    'content': comment.get('text', ''),
                    'author_hash': author_hash,
                    'likes': comment.get('digg_count', 0),
                    'reply_count': comment.get('reply_count', 0),
                    'create_time': datetime.fromtimestamp(comment.get('create_time', 0))
                }
                
                comments.append(comment_data)
            
            return comments
            
        except Exception as e:
            logger.warning(f"获取评论失败 {item_id}: {e}")
            return []
    
    async def fetch_related_articles(self, group_id: str) -> List[Dict[str, Any]]:
        """获取相关推荐文章
        
        Args:
            group_id: 文章组ID
            
        Returns:
            相关文章列表
        """
        params = {
            'group_id': group_id,
            'count': 10,
            'format': 'json'
        }
        
        url = f"{self.TOUTIAO_RELATED_API}?{'&'.join([f'{k}={v}' for k, v in params.items()])}"
        
        try:
            response_text = await self.fetch_with_rate_limit(url)
            if not response_text:
                return []
            
            data = json.loads(response_text)
            related = []
            
            for idx, item in enumerate(data.get('data', [])):
                article_url = item.get('source_url', '')
                if article_url and not article_url.startswith('http'):
                    article_url = f"https://www.toutiao.com{article_url}"
                
                related_item = {
                    'title': item.get('title', ''),
                    'url': article_url,
                    'relevance_score': 1.0 - (idx * 0.1)  # 按顺序递减相关度
                }
                
                if related_item['title']:
                    related.append(related_item)
            
            return related
            
        except Exception as e:
            logger.warning(f"获取相关推荐失败 {group_id}: {e}")
            return []
    
    async def crawl_hot_news(self, categories: List[str] = None, with_comments: bool = True, 
                           with_related: bool = True) -> List[Dict[str, Any]]:
        """爬取热点新闻
        
        Args:
            categories: 要爬取的分类列表
            with_comments: 是否获取评论
            with_related: 是否获取相关推荐
            
        Returns:
            新闻数据列表
        """
        if categories is None:
            categories = list(self.CATEGORY_MAPPING.keys())
        
        all_articles = []
        
        for category in categories:
            logger.info(f"开始爬取分类: {self.CATEGORY_MAPPING.get(category, category)}")
            
            try:
                # 获取该分类的新闻
                articles = await self.fetch_feed_by_category(category)
                
                # 增强文章数据（评论和相关推荐）
                for article in articles:
                    item_id = article['content']['metadata'].get('item_id')
                    group_id = article['content']['metadata'].get('group_id')
                    
                    if with_comments and item_id:
                        # 获取热门评论
                        comments = await self.fetch_comments(item_id, limit=10)
                        article['content']['comments'] = comments
                    
                    if with_related and group_id:
                        # 获取相关推荐
                        related = await self.fetch_related_articles(group_id)
                        article['content']['related_articles'] = related
                
                all_articles.extend(articles)
                logger.info(f"分类 {self.CATEGORY_MAPPING.get(category, category)} 爬取完成，获取 {len(articles)} 篇文章")
                
            except Exception as e:
                logger.error(f"爬取分类 {category} 失败: {e}")
                continue
        
        logger.info(f"头条热点爬取完成，共获取 {len(all_articles)} 篇文章")
        return all_articles
    
    async def check_duplicate(self, url_hash: str) -> Tuple[bool, Optional[Dict]]:
        """检查文章是否重复
        
        Args:
            url_hash: URL哈希值
            
        Returns:
            (是否重复, 已存在的文章数据)
        """
        if not self.mongodb_client:
            return False, None
        
        try:
            db = self.mongodb_client['crawler']
            collection = db['hot_topics']
            
            existing = collection.find_one({'url_hash': url_hash})
            
            if existing:
                # 检查是否需要更新（6小时阈值）
                fetch_time = existing.get('fetch_time', datetime.utcnow())
                if datetime.utcnow() - fetch_time > timedelta(hours=6):
                    return False, existing  # 需要更新
                return True, existing  # 不需要更新
            
            return False, None
            
        except Exception as e:
            logger.error(f"检查重复失败: {e}")
            return False, None
    
    async def store_to_mongodb(self, articles: List[Dict[str, Any]]) -> int:
        """存储到MongoDB
        
        Args:
            articles: 文章列表
            
        Returns:
            成功存储的数量
        """
        if not self.mongodb_client or not articles:
            return 0
        
        try:
            db = self.mongodb_client['crawler']
            collection = db['hot_topics']
            
            stored_count = 0
            
            for article in articles:
                url_hash = article['url_hash']
                
                # 检查重复
                is_duplicate, existing = await self.check_duplicate(url_hash)
                
                if is_duplicate:
                    logger.debug(f"文章已存在，跳过: {article['title']}")
                    continue
                
                if existing:
                    # 更新现有文章
                    update_data = {
                        '$set': {
                            'content.metadata': article['content']['metadata'],
                            'content.comments': article['content']['comments'],
                            'update_time': datetime.utcnow(),
                            'fetch_time': datetime.utcnow()
                        }
                    }
                    collection.update_one({'_id': existing['_id']}, update_data)
                    logger.debug(f"更新文章: {article['title']}")
                else:
                    # 插入新文章
                    collection.insert_one(article)
                    logger.debug(f"插入新文章: {article['title']}")
                
                stored_count += 1
            
            # 创建索引
            collection.create_index([('url_hash', 1)], unique=True)
            collection.create_index([('source', 1), ('fetch_time', -1)])
            collection.create_index([('category', 1)])
            collection.create_index([('publish_time', -1)])
            collection.create_index('expire_at', expireAfterSeconds=0)
            
            logger.info(f"成功存储 {stored_count} 篇文章到MongoDB")
            return stored_count
            
        except Exception as e:
            logger.error(f"存储到MongoDB失败: {e}")
            return 0
    
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析页面内容（实现基类抽象方法）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析后的数据列表
        """
        # 头条主要使用API，此方法用于备用HTML解析
        try:
            soup = BeautifulSoup(html, 'html.parser')
            articles = []
            
            # 解析HTML结构（根据实际页面结构调整）
            for item in soup.find_all('div', class_='article-item'):
                title = item.find('h3', class_='title')
                if not title:
                    continue
                
                article_data = {
                    'title': title.get_text().strip(),
                    'url': item.find('a')['href'] if item.find('a') else '',
                    'summary': item.find('p', class_='abstract').get_text() if item.find('p', class_='abstract') else '',
                    # ... 其他字段
                }
                
                articles.append(article_data)
            
            return articles
            
        except Exception as e:
            logger.error(f"HTML解析失败: {e}")
            return []


async def main():
    """测试主函数"""
    import logging
    logging.basicConfig(level=logging.INFO)
    
    async with ToutiaoHotSpider() as spider:
        # 获取热点新闻
        hot_items = await spider.crawl_hot_news(categories=['热点'], with_comments=False, with_related=False)
        
        if hot_items:
            print(f"\n=== 今日头条热榜测试结果 ===")
            print(f"获取到 {len(hot_items)} 条热点新闻")
            
            # 显示前5条
            for idx, item in enumerate(hot_items[:5], 1):
                print(f"{idx}. {item.get('title', 'N/A')}")
                print(f"   分类: {item.get('category', 'N/A')} | 来源: {item.get('source', 'N/A')}")
                metrics = item.get('metrics', {})
                print(f"   阅读: {metrics.get('read_count', 0)} | 评论: {metrics.get('comment_count', 0)}")
        else:
            print("未能获取今日头条热点数据")
        
        return len(hot_items) > 0


    async def crawl_hot(self) -> List[Dict[str, Any]]:
        """统一接口：获取热搜数据"""
        return await self.crawl_hot_news()

if __name__ == "__main__":
    import asyncio
    result = asyncio.run(main())
    exit(0 if result else 1)