import requests
import re
import json
import time
import random
from datetime import datetime
from requests.exceptions import RequestException, JSONDecodeError
import warnings

warnings.filterwarnings("ignore", category=requests.packages.urllib3.exceptions.InsecureRequestWarning)

# 配置部分
USER_ID = "1989660417"  # 你的目标用户ID
COOKIE = "SCF=Ao-pxrQEribc8c3784YkDQG3XrF0mgnMj9xK0xGFGzvfW5p68KFUDveAz1-s6jXHS1guIAiTdwjLcVrY1XXt5KY.; SUB=_2A25FNEWODeRhGedP7lYU-CbKzDiIHXVmSMdGrDV8PUNbmtAbLUf9kW9NX7yNUFhXue_FvIaTe8tnv0yUAUkHOAUD; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWLbYK47.BML-ISJFUAD3g.5JpX5KzhUgL.Fo2pSKBf1hncS0B2dJLoIf2LxK-L1-BL1-2LxKqL12eL1h2LxKqLB-qL12qLxK-LBKBLBK.LxK-L1h.L12BLxKnLB.qL1-zLxKBLBonL1h5LxKML1hzLBo.LxKML1hzLBo.t; ALF=02_1750581984; SINAGLOBAL=9039070546753.746.1748054123353; XSRF-TOKEN=xzoAKQK8j7WewWh2vt9Eor62; WBPSESS=fBayimxy6_CVgmJGwJi-T5QFogJE9S-8IRBpO-aI9FJxiPzRsQwdrKDynSSsX91pI0cmQ73AwU3bGrN3HG0SGfSDsO_XVozKCF0evahyW0n-QLXbfodGLjT24Sp5ySN3zZA_LeOnAtvsZ3QLLFX41A==; _s_tentry=open.weibo.com; Apache=9763721694097.898.1748098640266; ULV=1748098640268:2:2:2:9763721694097.898.1748098640266:1748054123395"
OUTPUT_FILE = "weibo_posts.json"
MAX_PAGES = 5
REQUEST_INTERVAL = (1, 3)  # 随机延迟区间


def get_enhanced_headers():
    """增强版请求头"""
    return {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
        "Referer": f"https://m.weibo.cn/u/{USER_ID}",
        "X-Requested-With": "XMLHttpRequest",
        "Accept": "application/json, text/plain, */*",
        "Cookie": COOKIE,
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9"
    }


def get_containerid(user_id):
    """更健壮的containerid获取"""
    url = f"https://m.weibo.cn/api/container/getIndex?type=uid&value={user_id}"
    for _ in range(3):  # 重试机制
        response = send_request(url)
        if response and response.get("data"):
            tabs = response["data"].get("tabsInfo", {}).get("tabs", [])
            for tab in tabs:
                if tab.get("tab_type") == "weibo":
                    return tab.get("containerid")
        time.sleep(2)
    return f"107603{user_id}"  # 默认fallback


def get_weibo_list(containerid, page):
    """获取微博列表"""
    url = "https://m.weibo.cn/api/container/getIndex"
    params = {
        "containerid": containerid,
        "page_type": "uid",
        "page": page,
        "count": 25  # 每页数量
    }
    return send_request(url, params=params)


def get_full_text(weibo_id):
    """获取微博全文"""
    url = "https://m.weibo.cn/statuses/extend"
    params = {"id": weibo_id}
    response = send_request(url, params=params)
    if response and response.get("data"):
        return response["data"].get("longTextContent", "")
    return None


def send_request(url, params=None):
    """增强版请求函数"""
    try:
        time.sleep(random.uniform(*REQUEST_INTERVAL))
        response = requests.get(
            url,
            headers=get_enhanced_headers(),
            params=params,
            timeout=15,
            verify=False  # 仅调试时使用
        )

        if response.status_code == 403:
            print(f"访问被拒绝，请检查Cookie有效性（当前URL: {url}）")
            return None

        response.raise_for_status()
        return response.json()
    except Exception as e:
        print(f"请求异常: {str(e)}")
        return None


def parse_weibo(weibo):
    """增强版微博解析"""
    try:
        mblog = weibo["mblog"]
        weibo_id = mblog["id"]

        # 内容提取优化
        content = mblog.get("text", "")
        if mblog.get("isLongText"):
            full_text = get_full_text(weibo_id)
            content = full_text if full_text else content

        # 高级清理
        content = re.sub(r'<[^>]+>|【.*?】|#.*?#|@\S+\s?', '', content).strip()

        # 多媒体处理
        media = {
            "images": [pic["large"]["url"] for pic in mblog.get("pics", [])],
            "video": mblog.get("page_info", {}).get("media_info", {}).get("mp4_720p_mp4")
        }

        return {
            "id": weibo_id,
            "time": format_time(mblog.get("created_at")),
            "content": content,
            "stats": {
                "reposts": mblog.get("reposts_count", 0),
                "comments": mblog.get("comments_count", 0),
                "likes": mblog.get("attitudes_count", 0)
            },
            "media": media,
            "url": f"https://m.weibo.cn/detail/{weibo_id}"
        }
    except Exception as e:
        print(f"解析异常: {str(e)}")
        return None


def format_time(raw_time):
    """标准化时间格式"""
    try:
        return datetime.strptime(raw_time, "%a %b %d %H:%M:%S %z %Y").strftime("%Y-%m-%d %H:%M:%S")
    except:
        return raw_time


def save_to_file(data, filename):
    """保存数据到文件"""
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    print(f"已保存 {len(data)} 条微博到 {filename}")


def crawl_weibo_user(containerid):
    """主爬取函数"""
    all_weibos = []

    for page in range(1, MAX_PAGES + 1):
        print(f"正在爬取第 {page} 页...")
        data = get_weibo_list(containerid, page)

        if not data or not data.get("data"):
            print(f"第 {page} 页无数据，可能已到末尾")
            break

        weibos = []
        for card in data["data"].get("cards", []):
            if card["card_type"] == 9 and "mblog" in card:
                if parsed := parse_weibo(card):
                    weibos.append(parsed)

        if not weibos:
            print("没有获取到有效微博")
            break

        all_weibos.extend(weibos)
        print(f"第 {page} 页获取到 {len(weibos)} 条微博")

    if all_weibos:
        save_to_file(all_weibos, OUTPUT_FILE)
    else:
        print("未获取到任何微博")


if __name__ == "__main__":
    print("启动增强版微博爬虫")
    start_time = time.time()

    if not all([USER_ID, COOKIE]):
        print("错误：请配置USER_ID和有效的COOKIE")
    else:
        containerid = get_containerid(USER_ID)
        if containerid:
            crawl_weibo_user(containerid)

    print(f"执行完毕，总耗时: {time.time() - start_time:.2f}秒")