#!/usr/bin/env python3
"""
微博爬虫核心模块
包含主要的爬虫类和相关的功能组件
"""

import time
import random
import requests
from typing import List, Dict, Optional, Any
from urllib.parse import quote
import logging

from models.weibo_post  import WeiboPost
from config.settings import setup_logging
from config.settings import WeiboConfig
from datetime import datetime, timedelta

setup_logging()
logger = logging.getLogger('WeiboSpider')


class RequestHandler:
    """请求处理器 - 负责发送HTTP请求和异常处理"""
    
    def __init__(self, config: WeiboConfig):
        self.config = config
        self.session = requests.Session()
        self.setup_session()
    
    def setup_session(self):
        """设置会话配置"""
        self.session.headers.update(self.config.headers)
        # 设置重试策略
        adapter = requests.adapters.HTTPAdapter(max_retries=3)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)
    
    def make_request(self, url: str, params: Dict, cookies: Dict = None) -> Optional[requests.Response]:
        """
        发送请求，包含重试机制和异常处理
        
        Args:
            url: 请求URL
            params: 请求参数
            cookies: Cookie信息
            
        Returns:
            Response对象或None
        """
        for attempt in range(self.config.max_retries):
            try:
                # 随机延迟，避免请求过快
                delay = random.uniform(*self.config.request_delay)
                logger.debug(f"请求延迟: {delay:.2f}秒")
                time.sleep(delay)
                
                response = self.session.get(
                    url,
                    params=params,
                    cookies=cookies,
                    timeout=self.config.timeout
                )
                
                if self._check_response(response):
                    return response
                else:
                    logger.warning(f"请求检查失败，尝试重试 ({attempt + 1}/{self.config.max_retries})")
                    
            except requests.exceptions.Timeout:
                logger.error(f"请求超时，尝试重试 ({attempt + 1}/{self.config.max_retries})")
            except requests.exceptions.ConnectionError:
                logger.error(f"连接错误，尝试重试 ({attempt + 1}/{self.config.max_retries})")
            except Exception as e:
                logger.error(f"请求异常: {e}，尝试重试 ({attempt + 1}/{self.config.max_retries})")
            
            # 重试延迟
            if attempt < self.config.max_retries - 1:
                time.sleep(self.config.retry_delay)
        
        return None
    
    def _check_response(self, response: requests.Response) -> bool:
        """检查响应是否有效"""
        if response.status_code != 200:
            logger.error(f"HTTP错误: {response.status_code}")
            return False
        
        if 'Sina Visitor System' in response.text:
            logger.error("触发微博访客验证系统")
            return False
        
        try:
            data = response.json()
            return data.get('ok') == 1
        except Exception as e:
            logger.error(f"响应解析失败: {e}")
            return False


class DataParser:
    """数据解析器 - 负责解析和清洗数据"""

    @staticmethod
    def _parse_time(time_str: str) -> datetime:
        """解析微博时间字符串为 datetime 对象"""
        try:
            if not time_str:
                return datetime.now()
                
            # 处理相对时间
            if '刚刚' in time_str:
                return datetime.now()
            elif '分钟前' in time_str:
                minutes = int(time_str.replace('分钟前', ''))
                return datetime.now() - timedelta(minutes=minutes)
            elif '小时前' in time_str:
                hours = int(time_str.replace('小时前', ''))
                return datetime.now() - timedelta(hours=hours)
            elif '今天' in time_str:
                time_part = time_str.split(' ')[-1]
                today = datetime.now().date()
                time_obj = datetime.strptime(time_part, '%H:%M').time()
                return datetime.combine(today, time_obj)
            else:
                # 尝试解析标准格式: "Mon Oct 27 21:35:01 +0800 2025"
                try:
                    return datetime.strptime(time_str, '%a %b %d %H:%M:%S %z %Y')
                except Exception as e:
                    logger.warning(f"标准时间格式解析失败: {e}, 尝试其他格式")
                    # 如果失败，返回当前时间
                    return datetime.now()
                    
        except Exception as e:
            logger.warning(f"时间解析失败: {e}, 使用当前时间")
            return datetime.now()
    
    @staticmethod
    def parse_search_results(data: Dict, keyword: str) -> List[WeiboPost]:
        """
        解析微博搜索结果
        
        Args:
            data: API返回的JSON数据
            keyword: 搜索关键词
            
        Returns:
            微博帖子列表
        """
        if not data or data.get('ok') != 1:
            return []
        
        weibo_posts = []
        cards = data.get('data', {}).get('cards', [])
        
        logger.debug(f"解析到 {len(cards)} 张卡片")
        
        for card_index, card in enumerate(cards):
            posts = DataParser._parse_card(card, keyword)
            weibo_posts.extend(posts)
        
        logger.info(f"解析到 {len(weibo_posts)} 条微博")
        return weibo_posts
    
    @staticmethod
    def _parse_card(card: Dict, keyword: str) -> List[WeiboPost]:
        """解析单个卡片"""
        posts = []
        card_type = card.get('card_type')
        
        if card_type == 9:
            # 直接微博卡片
            post = DataParser._parse_weibo_card(card, keyword)
            if post:
                posts.append(post)
        elif card_type == 11:
            # 卡片组
            card_group = card.get('card_group', [])
            for sub_card in card_group:
                if sub_card.get('card_type') == 9:
                    post = DataParser._parse_weibo_card(sub_card, keyword)
                    if post:
                        posts.append(post)
        elif card_type == 8:
            # 推荐用户卡片，跳过
            logger.debug("跳过推荐用户卡片")
        else:
            logger.debug(f"跳过未知卡片类型: {card_type}")
        
        return posts
    
    @staticmethod
    def _parse_weibo_card(card: Dict, keyword: str) -> Optional[WeiboPost]:
        """解析微博卡片"""
        mblog = card.get('mblog', {})
        if not mblog:
            return None
        
        try:
            # 清理文本
            text = mblog.get('text', '')
            text_clean = DataParser.clean_text(text)
            
            # 用户信息
            user_info = mblog.get('user', {})
            
            # 处理时间格式 - 转换为 datetime 对象
            created_at = mblog.get('created_at', '')
            publish_time = DataParser._parse_time(created_at)
            
            # 创建微博帖子对象 - 使用正确的字段名
            post = WeiboPost(
                id=str(mblog.get('id', '')),
                content=text_clean,  # 使用清理后的文本作为内容
                user_id=str(user_info.get('id', '')),
                user_name=user_info.get('screen_name', ''),
                publish_time=publish_time,  # 使用 datetime 对象
                reposts_count=mblog.get('reposts_count', 0),
                comments_count=mblog.get('comments_count', 0),
                likes_count=mblog.get('attitudes_count', 0),  # attitudes_count 对应 likes_count
                url=f"https://m.weibo.cn/status/{mblog.get('id', '')}"
            )
            
            logger.debug(f"成功解析微博: {post.user_name} - {text_clean[:50]}...")
            return post
            
        except Exception as e:
            logger.error(f"创建 WeiboPost 对象失败: {e}")
            # 打印调试信息
            logger.debug(f"mblog 数据: {mblog}")
            return None
    
    @staticmethod
    def clean_text(text: str) -> str:
        """清理文本，移除HTML标签和特殊字符"""
        import re
        import html
        
        if not text:
            return ""
        
        # 移除HTML标签
        clean_text = re.sub('<[^<]+?>', '', text)
        # 转换HTML实体
        clean_text = html.unescape(clean_text)
        # 移除URL
        clean_text = re.sub(r'http\S+', '', clean_text)
        # 移除@用户
        clean_text = re.sub(r'@\S+', '', clean_text)
        # 移除话题#
        clean_text = re.sub(r'#\S+#', '', clean_text)
        # 移除多余空白字符
        clean_text = re.sub(r'\s+', ' ', clean_text).strip()
        
        return clean_text
    
    @staticmethod
    def _get_current_time() -> str:
        """获取当前时间字符串"""
        from datetime import datetime
        return datetime.now().strftime('%Y-%m-%d %H:%M:%S')


class WeiboSpider:
    """微博爬虫主类"""
    
    def __init__(self, config: WeiboConfig = None):
        """
        初始化微博爬虫
        
        Args:
            config: 爬虫配置，如果为None则使用默认配置
        """
        self.config = config or WeiboConfig()
        self.request_handler = RequestHandler(self.config)
        self.data_parser = DataParser()
        self.cookies = self._load_cookies()
        
        logger.info("微博爬虫初始化完成")
    
    def _load_cookies(self) -> Dict:
        """加载Cookie配置"""
        import os
        from utils.file_utils import load_json
        
        # 尝试从环境变量加载Cookie
        cookie_from_env = self._load_cookies_from_env()
        if cookie_from_env:
            return cookie_from_env
        
        # 尝试从配置文件加载
        cookie_from_file = self._load_cookies_from_file()
        if cookie_from_file:
            return cookie_from_file
        
        # 使用默认Cookie（注意：这些Cookie可能会过期）
        return {
            'SUB': '_2AkMfo9yxf8NxqwFRmvwdy2vgboRyzg3EieKp_y1qJRM3HRl-yT9yqmNStRB6NCPyXg66YLpF01znthK7xubpdrmHWfAH',
            'SUBP': '0033WrSXqPxfM72-Ws9jqgMF55529P9D9WhFUDsspd69QDzOclKcUqm0',
            'MLOGIN': '0',
            '_T_WM': '93079193581',
            'WEIBOCN_FROM': '1110005030',
            'XSRF-TOKEN': 'bff066',
            'mweibo_short_token': 'c2e95e36c5',
        }
    
    def _load_cookies_from_env(self) -> Optional[Dict]:
        """从环境变量加载Cookie"""
        import os
        
        cookie_keys = ['SUB', 'SUBP', 'MLOGIN', '_T_WM', 'WEIBOCN_FROM', 'XSRF-TOKEN', 'mweibo_short_token']
        cookies = {}
        
        for key in cookie_keys:
            env_key = f'WEIBO_{key}'
            if env_key in os.environ:
                cookies[key] = os.environ[env_key]
        
        if cookies and len(cookies) >= 3:  # 至少需要几个关键Cookie
            logger.info("从环境变量加载Cookie成功")
            return cookies
        
        return None
    
    def _load_cookies_from_file(self) -> Optional[Dict]:
        """从配置文件加载Cookie"""
        import os
        from utils.file_utils import load_json
        
        cookie_files = [
            'configs/cookies.json',
            'cookies.json',
            '../configs/cookies.json'
        ]
        
        for file_path in cookie_files:
            if os.path.exists(file_path):
                try:
                    cookies = load_json(file_path)
                    if cookies and isinstance(cookies, dict):
                        logger.info(f"从文件 {file_path} 加载Cookie成功")
                        return cookies
                except Exception as e:
                    logger.warning(f"从文件 {file_path} 加载Cookie失败: {e}")
        
        return None
    
    def update_cookies(self, cookies: Dict):
        """
        更新Cookie
        
        Args:
            cookies: 新的Cookie字典
        """
        self.cookies.update(cookies)
        logger.info("Cookie已更新")
    
    def search_keyword(self, keyword: str, pages: int = None) -> List[WeiboPost]:
        """
        搜索关键词
        
        Args:
            keyword: 搜索关键词
            pages: 爬取页数
            
        Returns:
            微博帖子列表
        """
        pages = pages or self.config.max_pages
        all_posts = []
        
        logger.info(f"开始搜索关键词: '{keyword}'，计划爬取 {pages} 页")
        
        for page in range(1, pages + 1):
            logger.info(f"正在爬取第 {page} 页...")
            
            posts = self._search_single_page(keyword, page)
            if posts is None:
                logger.warning(f"第 {page} 页爬取失败，停止爬取")
                break
            
            all_posts.extend(posts)
            
            # 检查是否还有更多页面
            if len(posts) < self.config.page_size:
                logger.info("已到达最后一页")
                break
            
            # 显示进度
            logger.info(f"第 {page} 页爬取完成，获取 {len(posts)} 条微博，累计 {len(all_posts)} 条")
        
        logger.info(f"搜索完成，共获取 {len(all_posts)} 条微博")
        return all_posts
    
    def _search_single_page(self, keyword: str, page: int) -> Optional[List[WeiboPost]]:
        """搜索单页数据"""
        # 对关键词进行编码
        encoded_keyword = quote(keyword)
        
        # 构造containerid参数 - 只构造但不编码
        inner_containerid = f"100103type=1&q={keyword}"
        
        # 设置Referer - 使用编码后的containerid
        containerid_for_referer = quote(inner_containerid)
        headers = self.config.headers.copy()
        headers['referer'] = f'https://m.weibo.cn/search?containerid={containerid_for_referer}'
        
        # 临时更新请求头
        original_headers = self.request_handler.session.headers.copy()
        self.request_handler.session.headers.update(headers)
        
        try:
            # 请求参数 - 使用未编码的inner_containerid
            # urlencode会自动进行编码
            params = {
                'containerid': inner_containerid,  # 使用未编码的值
                'page_type': 'searchall',
                'page': page
            }

            from urllib.parse import urlencode
            query_string = urlencode(params)
            full_url = f"{self.config.base_url}?{query_string}"
            logger.info(f"请求URL: {full_url}")
            logger.info(f"请求参数: {params}")
            
            # 发送请求
            response = self.request_handler.make_request(
                self.config.base_url, 
                params, 
                self.cookies
            )
            
            if response is None:
                return None
            
            # 解析数据
            data = response.json()
            return self.data_parser.parse_search_results(data, keyword)
            
        except Exception as e:
            logger.error(f"解析数据失败: {e}")
            return None
        finally:
            # 恢复原始请求头
            self.request_handler.session.headers.update(original_headers)
    
    def batch_search(self, keywords: List[str], pages_per_keyword: int = None) -> Dict[str, List[WeiboPost]]:
        """
        批量搜索多个关键词
        
        Args:
            keywords: 关键词列表
            pages_per_keyword: 每个关键词爬取的页数
            
        Returns:
            按关键词分组的微博帖子字典
        """
        pages_per_keyword = pages_per_keyword or self.config.max_pages
        results = {}
        
        logger.info(f"开始批量搜索 {len(keywords)} 个关键词")
        
        for keyword in keywords:
            logger.info(f"处理关键词: '{keyword}'")
            
            posts = self.search_keyword(keyword, pages_per_keyword)
            results[keyword] = posts
            
            # 关键词间延迟
            if keyword != keywords[-1]:
                delay = random.uniform(5, 10)
                logger.info(f"关键词间延迟: {delay:.2f}秒")
                time.sleep(delay)
        
        logger.info(f"批量搜索完成，共处理 {len(keywords)} 个关键词")
        return results
    
    def get_trending_topics(self, category: str = "realtimehot") -> List[Dict[str, Any]]:
        """
        获取微博热搜话题
        
        Args:
            category: 热搜类别
                - "realtimehot": 热搜榜
                - "entrank": 娱乐榜
                - "sport": 体育榜
                - "game": 游戏榜
                
        Returns:
            热搜话题列表
        """
        category_map = {
            "realtimehot": "106003type=25",
            "entrank": "106003type=61",
            "sport": "106003type=60",
            "game": "106003type=59"
        }
        
        containerid = category_map.get(category, "106003type=25")
        
        params = {
            "containerid": containerid,
            "extparam": "discover|new",
            "luicode": "10000011",
            "lfid": "231583"
        }
        
        response = self.request_handler.make_request(
            self.config.base_url,
            params,
            self.cookies
        )
        
        if response is None:
            return []
        
        try:
            data = response.json()
            cards = data.get('data', {}).get('cards', [])
            
            trending_topics = []
            for card in cards:
                if card.get('card_type') == 8:
                    card_group = card.get('card_group', [])
                    for item in card_group:
                        if item.get('card_type') == 8:
                            desc = item.get('desc', '')
                            scheme = item.get('scheme', '')
                            trending_topics.append({
                                'topic': desc,
                                'url': scheme,
                                'category': category
                            })
            
            logger.info(f"获取到 {len(trending_topics)} 个热搜话题")
            return trending_topics
            
        except Exception as e:
            logger.error(f"解析热搜话题失败: {e}")
            return []
    
    def test_connection(self) -> bool:
        """
        测试连接是否正常
        
        Returns:
            连接是否成功
        """
        test_keyword = "测试"
        logger.info("测试爬虫连接...")
        
        try:
            posts = self._search_single_page(test_keyword, 1)
            success = posts is not None
            if success:
                logger.info("连接测试成功")
            else:
                logger.warning("连接测试失败")
            return success
        except Exception as e:
            logger.error(f"连接测试异常: {e}")
            return False


# 快捷函数
def create_spider(config: WeiboConfig = None) -> WeiboSpider:
    """
    创建微博爬虫实例的快捷函数
    
    Args:
        config: 爬虫配置
        
    Returns:
        WeiboSpider实例
    """
    return WeiboSpider(config)


def quick_search(keyword: str, pages: int = 3) -> List[WeiboPost]:
    """
    快速搜索关键词的快捷函数
    
    Args:
        keyword: 搜索关键词
        pages: 爬取页数
        
    Returns:
        微博帖子列表
    """
    spider = create_spider()
    return spider.search_keyword(keyword, pages)


if __name__ == "__main__":
    # 测试代码
    spider = WeiboSpider()
    
    # 测试连接
    if spider.test_connection():
        print("连接测试成功")
        
        # 搜索示例
        posts = spider.search_keyword("人工智能", 2)
        print(f"获取到 {len(posts)} 条微博")
        
        # 显示前几条
        for i, post in enumerate(posts[:3]):
            print(f"\n--- 微博 {i+1} ---")
            print(f"用户: {post.user_name}")
            print(f"内容: {post.text_clean[:100]}...")
            print(f"互动: 转发{post.reposts_count} 评论{post.comments_count} 点赞{post.attitudes_count}")
    else:
        print("连接测试失败，请检查网络或Cookie配置")