import requests
import re
import json
import time
import random
from urllib.parse import quote
from requests.exceptions import RequestException, JSONDecodeError

# 配置部分
USER_ID = "2803301701"  # 如："1669879400"(人民日报)
COOKIE = "SCF=Ao-pxrQEribc8c3784YkDQG3XrF0mgnMj9xK0xGFGzvfW5p68KFUDveAz1-s6jXHS1guIAiTdwjLcVrY1XXt5KY.; SUB=_2A25FNEWODeRhGedP7lYU-CbKzDiIHXVmSMdGrDV8PUNbmtAbLUf9kW9NX7yNUFhXue_FvIaTe8tnv0yUAUkHOAUD; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWLbYK47.BML-ISJFUAD3g.5JpX5KzhUgL.Fo2pSKBf1hncS0B2dJLoIf2LxK-L1-BL1-2LxKqL12eL1h2LxKqLB-qL12qLxK-LBKBLBK.LxK-L1h.L12BLxKnLB.qL1-zLxKBLBonL1h5LxKML1hzLBo.LxKML1hzLBo.t; ALF=02_1750581984; SINAGLOBAL=9039070546753.746.1748054123353; _s_tentry=open.weibo.com; Apache=9763721694097.898.1748098640266; ULV=1748098640268:2:2:2:9763721694097.898.1748098640266:1748054123395; XSRF-TOKEN=eqr27WwV_cgBTzS6QLL0-HiF; WBPSESS=fBayimxy6_CVgmJGwJi-T5QFogJE9S-8IRBpO-aI9FJxiPzRsQwdrKDynSSsX91ph73_Q24IuZLmMxj-wTP0FvtSey18QVsGkXeCNZEvZbF7-0VQ80AYzl5mdhvLwlifXjqoGVykEw6PoYiVb2C_xA=="  # 需从浏览器获取
OUTPUT_FILE = "weibo_posts.json"
MAX_PAGES = 5  # 最大爬取页数


def get_headers():
    """生成动态请求头"""
    return {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
        "Referer": f"https://m.weibo.cn/u/{USER_ID}",
        "X-Requested-With": "XMLHttpRequest",
        "Cookie": COOKIE
    }


def get_containerid(user_id):
    """获取用户containerid"""
    url = f"https://m.weibo.cn/api/container/getIndex?type=uid&value={user_id}"
    response = send_request(url)
    if response and response.get("data"):
        for tab in response["data"].get("tabsInfo", {}).get("tabs", []):
            if tab.get("tab_type") == "weibo":
                return tab.get("containerid")
    return None


def get_user_info(user_id):
    """获取用户基本信息"""
    url = f"https://m.weibo.cn/profile/info?uid={user_id}"
    response = send_request(url)
    if response and response.get("data"):
        return response["data"].get("user", {})
    return None


def get_weibo_list(containerid, page):
    """获取微博列表"""
    url = f"https://m.weibo.cn/api/container/getIndex"
    params = {
        "containerid": containerid,
        "page_type": "uid",
        "page": page
    }
    return send_request(url, params=params)


def get_full_text(weibo_id):
    """获取微博全文"""
    url = "https://m.weibo.cn/statuses/extend"
    params = {"id": weibo_id}
    response = send_request(url, params=params)
    if response and response.get("data"):
        return response["data"].get("longTextContent", "")
    return None


def send_request(url, params=None):
    """发送请求"""
    try:
        time.sleep(random.uniform(1, 3))  # 随机延迟
        response = requests.get(
            url,
            headers=get_headers(),
            params=params,
            timeout=10
        )
        response.raise_for_status()
        return response.json()
    except Exception as e:
        print(f"请求失败: {url} 错误: {str(e)}")
        return None


def parse_weibo(weibo):
    """解析单条微博"""
    try:
        mblog = weibo["mblog"]
        weibo_id = mblog["id"]

        # 获取完整内容（处理长微博）
        if mblog.get("isLongText"):
            content = get_full_text(weibo_id) or mblog.get("text", "")
        else:
            content = mblog.get("text", "")

        # 清理HTML标签
        content = re.sub(r'<[^>]+>', '', content)

        # 提取图片链接
        pics = []
        if "pics" in mblog:
            pics = [pic["large"]["url"] for pic in mblog["pics"]]

        return {
            "id": weibo_id,
            "time": mblog.get("created_at", ""),
            "content": content.strip(),
            "reposts": mblog.get("reposts_count", 0),
            "comments": mblog.get("comments_count", 0),
            "likes": mblog.get("attitudes_count", 0),
            "pics": pics,
            "url": f"https://m.weibo.cn/detail/{weibo_id}"
        }
    except Exception as e:
        print(f"解析微博失败: {str(e)}")
        return None


def save_to_file(data, filename):
    """保存数据"""
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    print(f"已保存 {len(data)} 条微博到 {filename}")


def crawl_weibo_user(user_id):
    """主爬取函数"""
    # 获取containerid
    containerid = get_containerid(user_id)
    if not containerid:
        print("获取containerid失败，请检查用户ID或Cookie")
        return

    # 获取用户信息
    user_info = get_user_info(user_id)
    if user_info:
        print(f"开始爬取用户: {user_info.get('screen_name')} (粉丝: {user_info.get('followers_count')})")

    all_weibos = []
    for page in range(1, MAX_PAGES + 1):
        print(f"正在爬取第 {page} 页...")
        data = get_weibo_list(containerid, page)

        if not data or not data.get("data"):
            print(f"第 {page} 页无数据，可能已到末尾")
            break

        weibos = data["data"].get("cards", [])
        if not weibos:
            print("没有更多微博了")
            break

        for card in weibos:
            if card["card_type"] == 9 and "mblog" in card:
                if weibo := parse_weibo(card):
                    all_weibos.append(weibo)

        print(f"第 {page} 页获取到 {len(weibos)} 条微博")

    if all_weibos:
        save_to_file(all_weibos, OUTPUT_FILE)
    else:
        print("未获取到任何微博")


if __name__ == "__main__":
    print("微博用户内容爬虫启动")
    start_time = time.time()

    if not USER_ID or not COOKIE:
        print("请先配置USER_ID和COOKIE")
    else:
        crawl_weibo_user(USER_ID)

    print(f"爬取完成，耗时 {time.time() - start_time:.2f} 秒")