import warnings
warnings.filterwarnings("ignore")

import time
import json
import argparse
from pathlib import Path
from typing import Dict, Any, List
import requests
import pandas as pd


def load_headers(headers_path: Path) -> Dict[str, str]:
    if not headers_path.exists():
        raise FileNotFoundError(f"未找到 headers.json，请创建并填入 Cookie/UA: {headers_path}")
    data = json.loads(headers_path.read_text(encoding='utf-8'))
    if not isinstance(data, dict):
        raise ValueError('headers.json 内容应为 JSON 对象，如 {"Cookie": "...", "User-Agent": "..."}')
    return {k: str(v) for k, v in data.items()}


def fetch_weibo_search(query: str, page: int, headers: Dict[str, str]) -> Dict[str, Any]:
    url = 'https://m.weibo.cn/api/container/getIndex'
    params = {
        'containerid': f'100103type=1&q={query}',
        'page_type': 'searchall',
        'page': page
    }
    resp = requests.get(url, params=params, headers=headers, timeout=15)
    resp.raise_for_status()
    return resp.json()


def parse_cards(data: Dict[str, Any]) -> List[Dict[str, Any]]:
    items: List[Dict[str, Any]] = []
    cards = data.get('data', {}).get('cards', [])
    for card in cards:
        mblog = card.get('mblog')
        if not mblog:
            continue
        item = {
            'id': mblog.get('id'),
            'text': mblog.get('text', ''),
            'created_at': mblog.get('created_at'),
            'like_count': mblog.get('attitudes_count', 0)
        }
        items.append(item)
    return items


def clean_html(raw_html: str) -> str:
    # m.weibo.cn 返回的 text 含 HTML 标签，简单去除
    import re
    text = re.sub(r'<[^>]+>', '', raw_html)
    return text.strip()


def crawl(query: str, pages: int, delay: float, headers: Dict[str, str]) -> pd.DataFrame:
    all_items: List[Dict[str, Any]] = []
    for p in range(1, pages + 1):
        print(f'抓取第 {p}/{pages} 页...')
        try:
            data = fetch_weibo_search(query, p, headers)
            items = parse_cards(data)
            for it in items:
                it['text'] = clean_html(it.get('text', ''))
            all_items.extend(items)
        except requests.HTTPError as e:
            print('HTTP 错误：', e)
        except requests.RequestException as e:
            print('网络异常：', e)
        time.sleep(delay)
    df = pd.DataFrame(all_items)
    if not df.empty:
        # 去重
        df = df.drop_duplicates(subset=['id']).reset_index(drop=True)
    return df


def main():
    parser = argparse.ArgumentParser(description='微博搜索抓取（方案A）')
    # 动态搜索关键词
    parser.add_argument('--query', type=str, default='阅兵', help='搜索关键词，如: 上海天气')
    parser.add_argument('--pages', type=int, default=3, help='抓取页数')
    parser.add_argument('--delay', type=float, default=1.5, help='请求间隔秒')
    parser.add_argument('--headers', type=str, default='headers.json', help='包含 Cookie/UA 的 JSON 文件')
    parser.add_argument('--output', type=str, default='weibo_crawl.csv', help='输出 CSV 文件名')
    args = parser.parse_args()

    project_root = Path(__file__).resolve().parent
    headers_path = project_root / args.headers
    out_path = project_root / args.output

    headers = load_headers(headers_path)
    df = crawl(args.query, args.pages, args.delay, headers)
    if df.empty:
        print('未抓取到数据，请检查关键词、Cookie 或者适当减少 pages。')
        return
    # 统一列名与分析脚本兼容
    columns = ['id', 'text', 'created_at', 'like_count']
    for c in columns:
        if c not in df.columns:
            df[c] = None
    df = df[columns]
    df.to_csv(out_path, index=False, encoding='utf-8')
    print('已保存：', out_path)


if __name__ == '__main__':
    main()


