#!/usr/bin/env python
"""
爬取所有平台的真实热点数据
支持微博、知乎、百度、今日头条、B站等多平台
"""
import asyncio
import logging
import re
import json
import sys
from datetime import datetime
from pathlib import Path
from motor.motor_asyncio import AsyncIOMotorClient
from bs4 import BeautifulSoup
import aiohttp
import hashlib

# 添加src目录到系统路径
sys.path.insert(0, str(Path(__file__).parent / 'src'))

# 导入自定义爬虫模块
try:
    from spiders.bilibili_hot_spider import BilibiliHotSpider
    from spiders.baidu_hot_spider import BaiduHotSpider
except ImportError as e:
    logger = logging.getLogger(__name__)
    logger.warning(f"无法导入爬虫模块: {e}")

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('crawler.log', encoding='utf-8')
    ]
)
logger = logging.getLogger(__name__)


class MultiPlatformCrawler:
    def __init__(self):
        self.mongodb_url = "mongodb://admin:newhand@localhost:27017/"
        self.db_name = "crawler_db"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        }
    
    async def crawl_weibo(self):
        """爬取微博热搜"""
        logger.info("=== 爬取微博热搜 ===")
        
        # 微博热搜API（无需登录的接口）
        urls = [
            "https://weibo.com/ajax/side/hotSearch",  # 官方API
            "https://s.weibo.com/top/summary?cate=realtimehot",  # 备用URL
        ]
        
        hot_items = []
        
        for url in urls:
            async with aiohttp.ClientSession() as session:
                try:
                    headers = self.headers.copy()
                    headers.update({
                        'Referer': 'https://s.weibo.com/',
                        'X-Requested-With': 'XMLHttpRequest'
                    })
                    
                    async with session.get(url, headers=headers, timeout=30) as response:
                        if response.status == 200:
                            if 'ajax' in url:
                                # 处理JSON响应
                                data = await response.json()
                                if data.get('data') and data['data'].get('realtime'):
                                    for idx, item in enumerate(data['data']['realtime'][:50], 1):
                                        hot_item = {
                                            'title': item.get('word', '').replace('#', ''),
                                            'url': f"https://s.weibo.com/weibo?q=%23{item.get('word', '')}%23",
                                            'rank': idx,
                                            'heat_value': str(item.get('num', 0)),
                                            'heat_value_numeric': int(item.get('num', 0)) if item.get('num') else 0,
                                            'platform': 'weibo',
                                            'category': item.get('category', '其他'),
                                            'crawled_at': datetime.utcnow(),
                                            'updated_at': datetime.utcnow()
                                        }
                                        
                                        # 添加标签信息
                                        if item.get('icon_desc'):
                                            hot_item['tag'] = item['icon_desc']
                                        if item.get('note'):
                                            hot_item['excerpt'] = item['note']
                                        
                                        hot_items.append(hot_item)
                                    
                                    logger.info(f"成功获取 {len(hot_items)} 条微博热搜")
                                    break
                            else:
                                # 处理HTML响应
                                html = await response.text()
                                hot_items = await self.parse_weibo_html(html)
                                if hot_items:
                                    break
                                    
                except Exception as e:
                    logger.error(f"爬取微博失败 ({url}): {e}")
                    continue
        
        if hot_items:
            await self.save_to_mongodb(hot_items, 'weibo')
            logger.info(f"成功存储 {len(hot_items)} 条微博热搜")
        else:
            logger.warning("未能获取微博热搜数据")
        
        return hot_items
    
    async def parse_weibo_html(self, html):
        """解析微博HTML页面"""
        hot_items = []
        soup = BeautifulSoup(html, 'html.parser')
        
        # 查找热搜表格
        tbody = soup.select_one('tbody')
        if tbody:
            trs = tbody.find_all('tr')
            for idx, tr in enumerate(trs[1:51], 1):  # 跳过标题行，最多50条
                try:
                    # 获取标题
                    td_title = tr.select_one('td.td-02')
                    if not td_title:
                        continue
                    
                    a_tag = td_title.find('a')
                    if not a_tag:
                        continue
                        
                    title = a_tag.get_text(strip=True)
                    href = a_tag.get('href', '')
                    if not href.startswith('http'):
                        href = f"https://s.weibo.com{href}"
                    
                    # 获取热度
                    td_hot = tr.select_one('td.td-02 span')
                    heat_value = td_hot.get_text(strip=True) if td_hot else ''
                    
                    hot_item = {
                        'title': title,
                        'url': href,
                        'rank': idx,
                        'heat_value': heat_value,
                        'heat_value_numeric': self.parse_heat_value(heat_value),
                        'platform': 'weibo',
                        'crawled_at': datetime.utcnow(),
                        'updated_at': datetime.utcnow()
                    }
                    
                    hot_items.append(hot_item)
                    
                except Exception as e:
                    logger.debug(f"解析微博项失败: {e}")
                    continue
        
        return hot_items
    
    async def crawl_zhihu(self):
        """爬取知乎热榜"""
        logger.info("=== 爬取知乎热榜 ===")
        
        # 知乎热榜API
        url = "https://www.zhihu.com/api/v3/feed/topstory/hot-lists/total?limit=50"
        
        hot_items = []
        
        async with aiohttp.ClientSession() as session:
            try:
                headers = self.headers.copy()
                headers.update({
                    'Referer': 'https://www.zhihu.com/hot',
                    'X-Requested-With': 'fetch'
                })
                
                async with session.get(url, headers=headers, timeout=30) as response:
                    if response.status == 200:
                        data = await response.json()
                        
                        if data.get('data'):
                            for idx, item in enumerate(data['data'][:50], 1):
                                target = item.get('target', {})
                                
                                hot_item = {
                                    'title': target.get('title', ''),
                                    'url': f"https://www.zhihu.com/question/{target.get('id', '')}",
                                    'rank': idx,
                                    'heat_value': item.get('detail_text', ''),
                                    'heat_value_numeric': self.parse_zhihu_heat(item.get('detail_text', '')),
                                    'platform': 'zhihu',
                                    'excerpt': target.get('excerpt', ''),
                                    'crawled_at': datetime.utcnow(),
                                    'updated_at': datetime.utcnow()
                                }
                                
                                # 添加额外信息
                                if target.get('answer_count'):
                                    hot_item['answer_count'] = target['answer_count']
                                if target.get('follower_count'):
                                    hot_item['follower_count'] = target['follower_count']
                                
                                hot_items.append(hot_item)
                            
                            logger.info(f"成功获取 {len(hot_items)} 条知乎热榜")
                    else:
                        logger.warning(f"知乎返回状态码: {response.status}")
                        
            except Exception as e:
                logger.error(f"爬取知乎失败: {e}")
        
        if hot_items:
            await self.save_to_mongodb(hot_items, 'zhihu')
            logger.info(f"成功存储 {len(hot_items)} 条知乎热榜")
        
        return hot_items
    
    def parse_zhihu_heat(self, heat_str):
        """解析知乎热度值"""
        if not heat_str:
            return 0
        
        # 知乎热度格式如 "432 万热度"
        match = re.search(r'(\d+(?:\.\d+)?)\s*万', heat_str)
        if match:
            return int(float(match.group(1)) * 10000)
        
        match = re.search(r'(\d+)', heat_str)
        if match:
            return int(match.group(1))
        
        return 0
    
    async def crawl_toutiao(self):
        """爬取今日头条热榜"""
        logger.info("=== 爬取今日头条热榜 ===")
        
        # 头条热榜接口
        url = "https://www.toutiao.com/hot-event/hot-board/?origin=toutiao_pc"
        
        hot_items = []
        
        async with aiohttp.ClientSession() as session:
            try:
                headers = self.headers.copy()
                headers.update({
                    'Referer': 'https://www.toutiao.com/'
                })
                
                async with session.get(url, headers=headers, timeout=30) as response:
                    if response.status == 200:
                        data = await response.json()
                        
                        if data.get('data'):
                            for idx, item in enumerate(data['data'][:50], 1):
                                hot_item = {
                                    'title': item.get('Title', ''),
                                    'url': item.get('Url', ''),
                                    'rank': idx,
                                    'heat_value': str(item.get('HotValue', 0)),
                                    'heat_value_numeric': int(item.get('HotValue', 0)),
                                    'platform': 'toutiao',
                                    'category': item.get('Label', '其他'),
                                    'crawled_at': datetime.utcnow(),
                                    'updated_at': datetime.utcnow()
                                }
                                
                                # 添加图片信息
                                if item.get('Image'):
                                    hot_item['image_url'] = item['Image'].get('url', '')
                                
                                hot_items.append(hot_item)
                            
                            logger.info(f"成功获取 {len(hot_items)} 条头条热榜")
                    else:
                        logger.warning(f"头条返回状态码: {response.status}")
                        
            except Exception as e:
                logger.error(f"爬取头条失败: {e}")
        
        if hot_items:
            await self.save_to_mongodb(hot_items, 'toutiao')
            logger.info(f"成功存储 {len(hot_items)} 条头条热榜")
        
        return hot_items
    
    async def crawl_baidu(self):
        """爬取百度热搜"""
        logger.info("=== 爬取百度热搜 ===")
        
        hot_items = []
        
        try:
            # 使用自定义爬虫
            async with BaiduHotSpider() as spider:
                hot_items = await spider.fetch_hot_searches()
                
            if hot_items:
                # 转换格式
                for item in hot_items:
                    item['crawled_at'] = datetime.utcnow()
                    item['updated_at'] = datetime.utcnow()
                    
                logger.info(f"成功获取 {len(hot_items)} 条百度热搜")
                
        except Exception as e:
            logger.warning(f"使用自定义爬虫失败: {e}, 尝试备用方案")
            
            # 备用方案：直接爬取
            url = "https://top.baidu.com/board?tab=realtime"
            async with aiohttp.ClientSession() as session:
                try:
                    async with session.get(url, headers=self.headers, timeout=30) as response:
                        if response.status == 200:
                            html = await response.text()
                            soup = BeautifulSoup(html, 'html.parser')
                            
                            # 查找热搜项
                            items = soup.select('.category-wrap_iQLoo')
                            for idx, item in enumerate(items[:50], 1):
                                title_elem = item.select_one('.c-single-text-ellipsis')
                                if title_elem:
                                    hot_item = {
                                        'title': title_elem.get_text(strip=True),
                                        'url': f"https://www.baidu.com/s?wd={title_elem.get_text(strip=True)}",
                                        'rank': idx,
                                        'platform': 'baidu',
                                        'crawled_at': datetime.utcnow(),
                                        'updated_at': datetime.utcnow()
                                    }
                                    hot_items.append(hot_item)
                                    
                except Exception as e:
                    logger.error(f"爬取百度备用方案失败: {e}")
        
        if hot_items:
            await self.save_to_mongodb(hot_items, 'baidu')
            logger.info(f"成功存储 {len(hot_items)} 条百度热搜")
        
        return hot_items
    
    async def crawl_bilibili(self):
        """爬取B站热门"""
        logger.info("=== 爬取B站热门 ===")
        
        hot_items = []
        
        try:
            # 使用自定义爬虫
            async with BilibiliHotSpider() as spider:
                videos = await spider.fetch_hot_videos(limit=50)
                
                # 转换格式
                for video in videos:
                    hot_item = {
                        'title': video.get('title', ''),
                        'url': video.get('url', ''),
                        'rank': video.get('rank', 0),
                        'author': video.get('author', ''),
                        'heat_value': str(video.get('stats', {}).get('view', 0)),
                        'heat_value_numeric': video.get('stats', {}).get('view', 0),
                        'platform': 'bilibili',
                        'crawled_at': datetime.utcnow(),
                        'updated_at': datetime.utcnow(),
                        'stats': video.get('stats', {})
                    }
                    hot_items.append(hot_item)
                
                logger.info(f"成功获取 {len(hot_items)} 条B站热门")
                
        except Exception as e:
            logger.error(f"爬取B站失败: {e}")
        
        if hot_items:
            await self.save_to_mongodb(hot_items, 'bilibili')
            logger.info(f"成功存储 {len(hot_items)} 条B站热门")
        
        return hot_items
    
    def parse_heat_value(self, heat_str):
        """通用热度值解析"""
        if not heat_str:
            return 0
        
        heat_str = str(heat_str).strip()
        
        # 移除非数字字符（保留万、亿）
        match = re.search(r'(\d+(?:\.\d+)?)\s*([万亿]?)', heat_str)
        if match:
            num, unit = match.groups()
            try:
                value = float(num)
                if unit == '万':
                    value *= 10000
                elif unit == '亿':
                    value *= 100000000
                return int(value)
            except:
                pass
        
        # 尝试直接转换
        try:
            return int(heat_str)
        except:
            return 0
    
    async def save_to_mongodb(self, items, platform):
        """保存数据到MongoDB"""
        if not items:
            return
        
        client = AsyncIOMotorClient(self.mongodb_url)
        db = client[self.db_name]
        collection = db[f'{platform}_hot_items']
        
        try:
            # 清空旧数据
            await collection.delete_many({})
            
            # 插入新数据
            result = await collection.insert_many(items)
            logger.info(f"成功插入 {len(result.inserted_ids)} 条数据到 {platform}_hot_items")
            
        except Exception as e:
            logger.error(f"保存到MongoDB失败: {e}")
        finally:
            client.close()
    
    async def run(self):
        """运行所有爬虫"""
        logger.info("开始爬取各平台真实数据...")
        
        # 并发爬取所有平台
        tasks = [
            self.crawl_weibo(),
            self.crawl_zhihu(),
            self.crawl_toutiao(),
            self.crawl_baidu(),
            self.crawl_bilibili()
        ]
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理结果
        for idx, result in enumerate(results):
            if isinstance(result, Exception):
                logger.error(f"任务 {idx} 失败: {result}")
        
        # 统计结果
        logger.info("\n=== 爬取结果统计 ===")
        
        client = AsyncIOMotorClient(self.mongodb_url)
        db = client[self.db_name]
        
        for platform in ['weibo', 'zhihu', 'toutiao', 'baidu', 'bilibili']:
            collection = db[f'{platform}_hot_items']
            count = await collection.count_documents({})
            logger.info(f"{platform}_hot_items: {count} 条数据")
            
            # 显示前3条数据
            if count > 0:
                samples = await collection.find().limit(3).to_list(length=3)
                for sample in samples:
                    logger.info(f"  [{sample.get('rank', 'N/A')}] {sample.get('title', 'N/A')[:30]}...")
        
        client.close()
        
        # 生成汇总报告
        await self.generate_report()
        
        logger.info("\n爬取完成！")
    
    async def generate_report(self):
        """生成汇总报告"""
        report = {
            'timestamp': datetime.utcnow().isoformat(),
            'platforms': {},
            'total_items': 0
        }
        
        client = AsyncIOMotorClient(self.mongodb_url)
        db = client[self.db_name]
        
        for platform in ['weibo', 'zhihu', 'toutiao', 'baidu', 'bilibili']:
            collection = db[f'{platform}_hot_items']
            count = await collection.count_documents({})
            
            top_items = []
            if count > 0:
                items = await collection.find().sort('rank', 1).limit(10).to_list(length=10)
                top_items = [
                    {
                        'rank': item.get('rank'),
                        'title': item.get('title'),
                        'heat_value': item.get('heat_value', ''),
                        'url': item.get('url', '')
                    }
                    for item in items
                ]
            
            report['platforms'][platform] = {
                'count': count,
                'top_10': top_items
            }
            report['total_items'] += count
        
        client.close()
        
        # 保存报告
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        report_file = f'hot_topics_report_{timestamp}.json'
        
        with open(report_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2, default=str)
        
        logger.info(f"汇总报告已保存到: {report_file}")


if __name__ == "__main__":
    crawler = MultiPlatformCrawler()
    asyncio.run(crawler.run())