import requests
import re
import csv
import os
from fake_useragent import UserAgent
import time
import random

# 配置参数
BASE_URL = "https://tieba.baidu.com/p/7882177660?pn={}"
OUTPUT_FILE = f"D:/230320203002/贴吧评论.csv"  # 替换实际学号
HEADERS = {
    'User-Agent': UserAgent().random,
    'Referer': 'https://tieba.baidu.com/'
}

def fetch_page(page_num):
    """带反爬策略的页面请求"""
    url = BASE_URL.format(page_num * 50)
    try:
        session = requests.Session()
        session.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
        response = session.get(url, headers=HEADERS, timeout=10)
        print(f"第{page_num+1}页状态码：{response.status_code}")
        time.sleep(random.uniform(1, 3))  # 随机延迟
        return response.text
    except Exception as e:
        print(f"请求失败：{str(e)}")
        return None

def parse_comments(html):
    """新版贴吧正则解析"""
    try:
        # 使用JSON数据解析（更稳定）
        data = re.search(r'<script>window\.__INITIAL_STATE__=(.*?);</script>', html).group(1)
        import json
        comments_data = json.loads(data)['content']['postList']
        
        # 提取用户名、时间、内容
        return [
            (
                comment['user']['username'],
                comment['createTime'],
                comment['content']
            ) 
            for comment in comments_data
        ]
    except Exception as e:
        print(f"解析失败：{str(e)}")
        return []

def save_to_csv(data, filename):
    """增强型CSV保存"""
    os.makedirs(os.path.dirname(filename), exist_ok=True)
    with open(filename, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.writer(f)
        writer.writerow(['用户名', '评论时间', '评论内容'])
        writer.writerows(data)
    print(f"成功保存{len(data)}条数据至：{filename}")

if __name__ == "__main__":
    all_comments = []
    for page in range(3):  # 示例爬取前3页
        html = fetch_page(page)
        if html:
            all_comments.extend(parse_comments(html))
    save_to_csv(all_comments, OUTPUT_FILE)