import requests
import time
import random
from datetime import datetime, timedelta
from urllib.parse import quote  # 添加导入语句
import logging
# ----------------------
# 1. 数据采集模块
# ----------------------
class DataCollector:
    def __init__(self, cookie=None, proxy_pool=None):
        """初始化微博数据采集器"""
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
            'Referer': 'https://m.weibo.cn/search?containerid=100103type=1',
            'X-Requested-With': 'XMLHttpRequest'
        }
        
        if cookie:
            self.headers['Cookie'] = cookie
            
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        self.proxy_pool = proxy_pool or []
        self.last_request_time = 0


    # 在 DataCollector 类中添加容器ID获取方法
    def get_containerid(self, keyword):
        """获取实时搜索的容器ID"""
        search_url = f"https://m.weibo.cn/api/container/getIndex?type=all&q={keyword}"
        try:
            response = self.session.get(search_url, headers=self.headers)
            data = response.json()
            
            # 新版API结构处理
            if 'data' in data:
                # 方式1：从cardlistInfo获取
                if 'cardlistInfo' in data['data']:
                    return data['data']['cardlistInfo']['containerid']
                # 方式2：从tabsInfo获取
                if 'tabsInfo' in data['data']:
                    for tab in data['data']['tabsInfo']['tabs']:
                        if tab.get('title') in ('实时', '热门'):
                            return tab['containerid']
            # 方式3：从第一个卡片获取
            if 'cards' in data['data'] and len(data['data']['cards']) > 0:
                return data['data']['cards'][0]['card_group'][0]['scheme'].split('=')[-1]
                
        except Exception as e:
            logging.error(f"获取容器ID失败: {str(e)}")
            logging.debug(f"API响应内容: {response.text}")
        return f"100103type=1&q={keyword}"  # 最终fallback
    
    # 在DataCollector类中添加重试机制和代理支持
    # 改进后的微博爬虫核心逻辑
    def crawl_weibo(self, keyword, pages=20, start_date=None, end_date=None):

        data = []
        containerid = self.get_containerid(keyword)
        
        # 设置默认时间范围（最近一个月）
        today = datetime.now()
        if not start_date:
            start_date = (today - timedelta(days=30)).strftime('%Y-%m-%d-0')
        if not end_date:
            end_date = today.strftime('%Y-%m-%d-23')
        
        max_pages = min(pages, 50)  # 微博最多只显示50页结果[1](@ref)
        
        for page in range(1, max_pages + 1):
            
                # 1. 动态延迟策略（关键页休眠5-10秒）[8](@ref)
                current_time = time.time()
                if page % 3 == 0 and current_time - self.last_request_time < 10:
                    sleep_time = random.uniform(2, 5)
                    logging.info(f"关键页延迟 {sleep_time:.1f}秒 防封禁")
                    time.sleep(sleep_time)
                
                # 2. 构造请求URL（含时间范围过滤）[3](@ref)
                url = (
                    f"https://m.weibo.cn/api/container/getIndex?"
                    f"containerid={containerid}&page={page}&"
                    f"q={quote(keyword)}&"
                    f"starttime={start_date}&endtime={end_date}"
                )
                
                # 3. 请求时添加超时+代理IP轮换[6,8](@ref)
                request_params = {'timeout':30}
                if self.proxy_pool:
                    proxy = random.choice(self.proxy_pool)
                    request_params['proxies'] = {"https": proxy}
                    logging.debug(f"使用代理IP: {proxy}")
                
                # 4. 控制请求频率（最小间隔1秒）[8](@ref)
                elapsed = current_time - self.last_request_time
                if elapsed < 1.0:
                    time.sleep(1.0 - elapsed)

                response = self.session.get(url,**request_params)
                self.last_request_time = time.time()
                
                # 5. 空数据检查与跳过[1](@ref)
                json_data = response.json()
                if not json_data.get('ok') or not json_data.get('data'):
                    logging.warning(f"第{page}页无数据，跳过")
                    continue
                    
                # 6. 解析微博卡片数据
                cards = json_data['data'].get('cards', [])
                page_data_count = 0
                
                for card in cards:
                    if 'card_group' not in card:
                        continue
                        
                    for item in card['card_group']:
                        if 'mblog' in item:
                            blog = item['mblog']
                            
                            # 7. 数据标准化处理
                            blog_data = {
                                'id': blog['id'],
                                'text': blog.get('text', ''),
                                'user': blog['user']['screen_name'],
                                'reposts': blog['reposts_count'],
                                'comments': blog['comments_count'],
                                'likes': blog['attitudes_count'],
                                'timestamp': blog['created_at'],
                                'platform': 'weibo',
                                'page': page
                            }
                            
                            data.append(blog_data)
                            page_data_count += 1
                
                # 8. 进度日志
                logging.info(f"已采集第{page}/{max_pages}页, 本页数据: {page_data_count}条, 累计: {len(data)}条")
                
                # 9. 空页提前终止
                if page_data_count == 0:
                    logging.info(f"第{page}页无数据，提前终止采集")
                    break
                
                # 10. 随机延迟（普通页0-1秒）[6](@ref)
                time.sleep(random.uniform(0, 1))
                
        
        # 11. 最终结果统计
        logging.info(f"采集完成! 总页数: {max_pages}, 有效数据: {len(data)}条")
        return data
    
    