import requests
import csv
import json
from bs4 import BeautifulSoup
from typing import List, Dict
import os
from DrissionPage import ChromiumPage,ChromiumOptions
from urllib.parse import urlencode
# 配置请求头
HEADERS = {
    # 'cookie': 'SCF=AukPyQyICIRDO7wzz6YF_6K2FbsmiSMgZoTrkiKIZA8JisqMK6xV5GepXg7S6kGVJiOvJKeGtrx1n9sjZJngkN8.; SINAGLOBAL=3848379929624.2236.1743251690894; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFRrog9G8BEWo6-oC5ci.S15JpX5KMhUgL.FoMRSo-7Son4SK-2dJLoI7UQdGSuSoqc; ALF=1747541638; SUB=_2A25FBaHWDeRhGeFG7VcR9ibFzjmIHXVmerserDV8PUJbkNANLUX2kW1NeQ8Xg6InyECKZw1ZzQRzisBVVi_17fyl; ULV=1744949651626:6:5:2:3492453778928.206.1744949651623:1744641284606; XSRF-TOKEN=EnLdbmwKUZY49IjGavy6TgYb; WBPSESS=S4W52QWAM_hnHD8p7--a3JfOKLiXBDqGuJ8rZ5OpenGvH5lrbpXek8_N4cI5D6bqnp7mwpS5f5ZzjEgjUYnvJcO90Rr-M0Do6ekuOstyOY-kUglGWzzBNQtV5rp4IK3jOo8O3iq7RSlgb4Un-LMAGg==',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0',
    # 'x-xsrf-token': '3Mn-8yS7GxC-TqRlHgeSKtwL',
}


# 配置请求参数
PARAMS = {
    'is_reload': '1',
    'id': None,
    'is_show_bulletin': '2',
    'is_mix': '0',
    'count': '20',
    'max_id': None,
    'uid': '2382356540',
    'fetch_level': '0',
    'locale': 'zh-CN',
}

# 搜索参数
SEARCH_PARAMS = {
    'q': '甲亢哥长沙',  # 可修改为其他关键词
}

# CSV 文件字段
FIELDNAMES = ['昵称', '性别', '地区', '点赞数', '评论']

# 最大评论数限制
MAX_COMMENTS = 1000
def GetCookie():
    '''自动登录获取cookie'''
    #使用DrissionPage保存登录信息，下次自动登录，第一次运行需要登录，然后保存登录信息到user_data,之后就自动登录。
    co = ChromiumOptions()
    co.set_user_data_path(r'user_data')
    #拼接请求参数进行url编码
    url = f'https://s.weibo.com/weibo?{urlencode(SEARCH_PARAMS)}'
    page = ChromiumPage(co)
    page.get(url)
    #提取cookie
    cookies_list = page.cookies()
    cookies = {cookie['name']: cookie['value'] for cookie in cookies_list}
    return cookies


def get_page() -> str:
    """获取搜索页面内容"""
    try:
        response = requests.get('https://s.weibo.com/weibo', params=SEARCH_PARAMS, headers=HEADERS,cookies=GetCookie(),timeout=10)
        response.raise_for_status()
        return response.text
    except requests.RequestException as e:
        print(f"获取页面失败: {e}")
        return ""

def get_mid() -> List[str]:
    """解析页面获取微博帖子 mid"""
    html_data = get_page()
    if not html_data:
        return []

    mid_vec = []
    try:
        soup = BeautifulSoup(html_data, 'lxml')
        mid_list = soup.find_all(class_='card-wrap')
        for mid in mid_list[:-1]:  # 排除最后一个无关元素
            mid_id = mid.get('mid')
            if mid_id:
                mid_vec.append(mid_id)
    except Exception as e:
        print(f"解析 mid 失败: {e}")

    return mid_vec

def fetch_comments(post_id: str, total_comments: List[Dict]) -> List[Dict]:
    """抓取指定微博帖子的评论，限制总数不超过 MAX_COMMENTS"""
    comments = []
    params = PARAMS.copy()
    params['id'] = post_id
    params['max_id'] = None

    while True:
        # 检查是否达到最大评论数
        if len(total_comments) + len(comments) >= MAX_COMMENTS:
            comments = comments[:MAX_COMMENTS - len(total_comments)]  # 裁剪到剩余数量
            break

        try:
            response = requests.get('https://weibo.com/ajax/statuses/buildComments',
                                   headers=HEADERS, params=params,cookies=GetCookie(), timeout=10)
            response.raise_for_status()
            data = response.json()

            if not data.get('data'):
                break

            for comment in data['data']:
                comment_data = {
                    '昵称': comment.get('user', {}).get('screen_name', ''),
                    '性别': comment.get('user', {}).get('gender', ''),
                    '地区': comment.get('source', ''),
                    '点赞数': comment.get('like_counts', 0),
                    '评论': comment.get('text_raw', '')
                }
                print(comment_data)
                comments.append(comment_data)

            params['max_id'] = data.get('max_id')
            if not params['max_id']:
                break

        except requests.RequestException as e:
            print(f"请求错误: {e}")
            break
        except json.JSONDecodeError:
            print("JSON 解析错误")
            break
        except KeyError as e:
            print(f"数据结构错误: {e}")
            break

    return comments

def save_to_csv(comments: List[Dict], filename: str = '微博评论.csv'):
    """将评论保存到 CSV 文件"""
    try:
        # 检查文件是否存在，若不存在则写入表头
        file_exists = os.path.exists(filename)

        with open(filename, mode='a', encoding='utf-8-sig', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=FIELDNAMES)

            # 仅在文件不存在时写入表头
            if not file_exists:
                writer.writeheader()

            # 写入评论数据
            writer.writerows(comments)
    except IOError as e:
        print(f"写入 CSV 文件错误: {e}")

def main():
    id_list = get_mid()  # 获取微博帖子 ID 列表
    if not id_list:
        print("未获取到任何帖子 ID")
        return

    total_comments = []  # 跟踪所有帖子的评论总数
    for index, post_id in enumerate(id_list, start=1):
        if len(total_comments) >= MAX_COMMENTS:
            print(f"已收集 {MAX_COMMENTS} 条评论，停止抓取")
            break

        print(f"正在抓取第{index}篇帖子的评论...")
        comments = fetch_comments(post_id, total_comments)
        if comments:
            total_comments.extend(comments)
            save_to_csv(comments)
            print(f"已保存 {len(comments)} 条评论到 {filename}")
        else:
            print(f"该帖子没有评论，跳过...")

    print(f"抓取完成，总共保存 {len(total_comments)} 条评论")

if __name__ == "__main__":
    filename = '微博评论.csv'
    main()