import requests
import re
import json
import time
import random
from datetime import datetime
from requests.exceptions import RequestException, JSONDecodeError

# 配置部分
SEARCH_KEY = "科技"  # 搜索关键词
SEARCH_PAGES = 3  # 爬取页数
SEARCH_TYPE = "1"  # 1=综合搜索
OUTPUT_FILE = "weibo_data.json"  # 输出文件名


def send_request(url, headers, method="GET", params=None, data=None):
    """
    发送HTTP请求并返回JSON数据
    """
    try:
        response = requests.request(
            method.upper(),
            url,
            headers=headers,
            params=params,
            json=data,
            timeout=10
        )
        response.raise_for_status()  # 检查HTTP状态码
        return response.json()
    except JSONDecodeError:
        print(f"JSON解析失败 - URL: {url} 响应: {response.text[:200]}...")
        return None
    except RequestException as e:
        print(f"请求失败 - URL: {url} 错误: {str(e)}")
        return None


def get_headers():
    """生成动态请求头"""
    return {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
        "Referer": "https://m.weibo.cn/",
        "X-Requested-With": "XMLHttpRequest",
        "Accept": "application/json, text/plain, */*",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cookie": "YOUR_COOKIE_HERE"  # 替换为真实Cookie
    }


def search_weibo(keyword, page, search_type):
    """微博搜索API"""
    url = "https://m.weibo.cn/api/container/getIndex"
    params = {
        "containerid": f"100103type={search_type}&q={keyword}",
        "page_type": "searchall",
        "page": page
    }
    return send_request(url, headers=get_headers(), params=params)


def get_post_detail(post_id):
    """获取微博正文详情"""
    url = "https://m.weibo.cn/statuses/extend"
    params = {"id": post_id}
    return send_request(url, headers=get_headers(), params=params)


def parse_post(post):
    """解析单条微博数据"""
    try:
        mblog = post["mblog"]
        post_id = mblog["id"]

        # 获取正文内容（带容错）
        detail = get_post_detail(post_id)
        content = ""
        if detail and "data" in detail:
            content = re.sub(r'<.*?>', '', detail["data"]["longTextContent"]).strip()

        return {
            "id": post_id,
            "user": mblog.get("user", {}).get("screen_name", ""),
            "time": mblog.get("created_at", ""),
            "source": mblog.get("source", ""),
            "reposts": mblog.get("reposts_count", 0),
            "comments": mblog.get("comments_count", 0),
            "likes": mblog.get("attitudes_count", 0),
            "content": content,
            "url": f"https://m.weibo.cn/status/{post_id}"
        }
    except Exception as e:
        print(f"解析微博失败: {str(e)}")
        return None


def process_page(page_data):
    """处理单页数据"""
    results = []
    if not page_data or not page_data.get("data"):
        return results

    for card in page_data["data"].get("cards", []):
        # 处理普通微博卡片
        if card["card_type"] == 9 and "mblog" in card:
            if post := parse_post(card):
                results.append(post)

        # 处理综合搜索的卡片组
        elif card["card_type"] == 11:
            for item in card.get("card_group", []):
                if item["card_type"] == 9 and "mblog" in item:
                    if post := parse_post(item):
                        results.append(post)

    return results


def save_to_file(data, filename):
    """保存数据到JSON文件"""
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    print(f"已保存 {len(data)} 条数据到 {filename}")


def main():
    all_posts = []

    for page in range(1, SEARCH_PAGES + 1):
        print(f"正在爬取第 {page} 页...")

        # 搜索微博
        data = search_weibo(SEARCH_KEY, page, SEARCH_TYPE)
        if not data:
            print(f"第 {page} 页获取失败")
            continue

        # 处理数据
        posts = process_page(data)
        if not posts:
            print(f"第 {page} 页无有效数据")
            break

        all_posts.extend(posts)
        print(f"第 {page} 页获取到 {len(posts)} 条微博")

        # 随机延迟防止被封
        time.sleep(random.uniform(2, 5))

    # 保存结果
    if all_posts:
        save_to_file(all_posts, OUTPUT_FILE)
    else:
        print("未获取到有效数据")


if __name__ == "__main__":
    print(f"开始爬取微博关键词: {SEARCH_KEY} (共 {SEARCH_PAGES} 页)")
    start_time = time.time()
    main()
    print(f"爬取完成，耗时 {time.time() - start_time:.2f} 秒")