import scrapy
import json
from bs4 import BeautifulSoup
from datetime import datetime
import time
import random
import logging

class WeiboCommentsSpider(scrapy.Spider):
    name = 'weibo_comments'
    allowed_domains = ['weibo.com']
    
    def __init__(self, pid=None, uid=None, *args, **kwargs):
        super(WeiboCommentsSpider, self).__init__(*args, **kwargs)
        self.pid = pid or '5150765468882769'  # 默认微博ID
        self.uid = uid or '1647210043'  # 默认用户ID
        self.max_id = 0
        # 完整的Cookie字符串
        self.cookie_str = "SCF=ArpvUunQoKALUNqQAxf58Gxg95RBplr1I2FvJgQ38fKcrNVT6M93Co2yD3a7rvQFkBIbWZ6mStUoxgmwmqsg4NY.; ALF=02_1747316376; SINAGLOBAL=3855831980329.203.1744724567190; ULV=1744724567209:1:1:1:3855831980329.203.1744724567190:; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9WWoKI4vLAW3FfcV9U7FOHGN; SUB=_2AkMQouM2f8NxqwFRmfERz27mbo5_yw3EieKm_hLtJRMxHRl-yj9kqksOtRB6OyLN2Wb6NUd5prlagibBJiHzbdm2HH7J; XSRF-TOKEN=JnaxiDJ_cJ40r_J73UYBFt5o; WBPSESS=voLfPs8eGy8pkyBjwwkfahcbbAnsUizQgB4Ok85dBGQ1Z1q6TVw71bKk3x905DpLw6nroi0jCxfkSERM6155Ypv8bSZpx6h9CuWntUh1S9rfRJWJSNWs2SiS8JkUu2Jozwc7uXy1rYY3mHh8LfcoYqFI1KXrIZnWK29QSHm7RUo=="
        # Cookie字典
        self.cookies = {
            'SCF': 'ArpvUunQoKALUNqQAxf58Gxg95RBplr1I2FvJgQ38fKcrNVT6M93Co2yD3a7rvQFkBIbWZ6mStUoxgmwmqsg4NY.',
            'ALF': '02_1747316376',
            'SINAGLOBAL': '3855831980329.203.1744724567190',
            'ULV': '1744724567209:1:1:1:3855831980329.203.1744724567190:',
            'SUBP': '0033WrSXqPxfM72-Ws9jqgMF55529P9D9WWoKI4vLAW3FfcV9U7FOHGN',
            'SUB': '_2AkMQouM2f8NxqwFRmfERz27mbo5_yw3EieKm_hLtJRMxHRl-yj9kqksOtRB6OyLN2Wb6NUd5prlagibBJiHzbdm2HH7J',
            'XSRF-TOKEN': 'JnaxiDJ_cJ40r_J73UYBFt5o',
            'WBPSESS': 'voLfPs8eGy8pkyBjwwkfahcbbAnsUizQgB4Ok85dBGQ1Z1q6TVw71bKk3x905DpLw6nroi0jCxfkSERM6155Ypv8bSZpx6h9CuWntUh1S9rfRJWJSNWs2SiS8JkUu2Jozwc7uXy1rYY3mHh8LfcoYqFI1KXrIZnWK29QSHm7RUo=='
        }
        
    def start_requests(self):
        # 首先访问微博主页
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cookie": self.cookie_str
        }
        
        yield scrapy.Request(
            url="https://weibo.com",
            headers=headers,
            cookies=self.cookies,
            callback=self.after_login,
            dont_filter=True
        )
    
    def after_login(self, response):
        # 随机延迟1-3秒
        time.sleep(random.uniform(1, 3))
        
        url = "https://weibo.com/ajax/statuses/buildComments"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
            "Referer": f"https://weibo.com/{self.uid}/{self.pid}",
            "Accept": "application/json, text/plain, */*",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "X-Requested-With": "XMLHttpRequest",
            "Cookie": self.cookie_str,
            "sec-ch-ua": '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": '"Windows"'
        }
        
        params = {
            "flow": 0,
            "is_reload": 1,
            "id": self.pid,
            "is_show_bulletin": 2,
            "is_mix": 0,
            "max_id": self.max_id,
            "count": 20,
            "uid": self.uid,
            "fetch_level": 0,
            "locale": "zh-CN",
        }
        
        full_url = f"{url}?{'&'.join([f'{k}={v}' for k, v in params.items()])}"
        self.logger.info(f"开始请求评论URL: {full_url}")
        
        yield scrapy.Request(
            url=full_url,
            headers=headers,
            cookies=self.cookies,
            callback=self.parse,
            dont_filter=True
        )
    
    def parse(self, response):
        try:
            self.logger.info(f"响应状态码: {response.status}")
            self.logger.info(f"响应内容预览: {response.text[:200]}...")
            
            data = json.loads(response.text)
            
            if not data or "data" not in data:
                self.logger.error(f"无效的JSON响应或缺少'data'字段")
                return
                
            comments = data["data"]
            max_id = data.get("max_id", 0)
            
            for comment in comments:
                try:
                    item = {
                        'comment_id': str(comment.get("id", "")),
                        'created_at': comment.get("created_at", ""),
                        'user_id': str(comment.get("user", {}).get("id", "")),
                        'user_name': comment.get("user", {}).get("name", ""),
                        'user_city': comment.get("user", {}).get("location", "其他"),
                        'like_counts': int(comment.get("like_counts", 0)),
                        'reply_counts': int(comment.get("total_number", 0)),
                        'content': BeautifulSoup(comment.get("text", ""), "html.parser").text.strip()
                    }
                    
                    # 记录处理的数据
                    self.logger.info(f"处理评论数据: {item}")
                    yield item
                except Exception as e:
                    self.logger.error(f"解析评论时出错: {e}")
                    continue
            
            if max_id != 0:
                # 随机延迟1-3秒
                time.sleep(random.uniform(1, 3))
                # 继续爬取下一页
                url = response.url
                next_url = url.replace(f"max_id={self.max_id}", f"max_id={max_id}")
                self.max_id = max_id
                self.logger.info(f"请求下一页: {next_url}")
                yield scrapy.Request(
                    url=next_url,
                    headers=response.request.headers,
                    cookies=self.cookies,
                    callback=self.parse,
                    dont_filter=True
                )
                
        except Exception as e:
            self.logger.error(f"处理响应时出错: {e}") 