# 作者：张同学
# |数据分析|网络爬虫|人工智能|数学建模|Gui界面设计
# 开源仓库：https://gitee.com/zhang-junah/SpiderProject
# 商务|业务|简历投递|联系微信：traffic_zhang
# 张同学代码兼职团队招聘！一单开发周期三天，500+单薪！共享所有学习资料！

import requests
from bs4 import BeautifulSoup


# 请求发送组件
def get_html(page: int, limit: int):
    import requests

    # 构造请求头，cookie最好换成你的，我的可能失效了。
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "cookie":"""bid=VZ4mbdvX2gQ; _pk_id.100001.3ac3=09370c8fcfcf9939.1720090168.; _vwo_uuid_v2=D70F1882CA9FE3A02764A22EBC2C6656B|cf7af0c0b3a707fbeaf444c9f89a023f; douban-fav-remind=1; _vwo_uuid_v2=D70F1882CA9FE3A02764A22EBC2C6656B|cf7af0c0b3a707fbeaf444c9f89a023f; viewed="36150423_26948956_34863631"; _pk_ref.100001.3ac3=%5B%22%22%2C%22%22%2C1734491337%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_ses.100001.3ac3=1; ap_v=0,6.0; __utma=30149280.451472970.1720090168.1733911235.1734491337.12; __utmc=30149280; __utmz=30149280.1734491337.12.11.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=81379588.1062429145.1720090168.1733911235.1734491337.8; __utmc=81379588; __utmz=81379588.1734491337.8.7.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmt_douban=1; __utmt=1; dbcl2="276484751:ZhJeZKpQp3I"; ck=jGMK; push_noty_num=0; push_doumail_num=0; __utmt=1; __utmv=30149280.27648; frodotk_db="bf0f54a12d1954e93d2dfde3874bfebb"; __utmb=30149280.22.10.1734491337; __utmb=81379588.13.10.1734491337"""
    }

    # 构造参数字典
    params = {
        "start": page * 20,
        "limit": limit,
        "status": "P",
        "sort": "score"
    }

    # 构造请求 URL（基础 URL）
    url = "https://book.douban.com/subject/36150423/comments/"

    # 发送 GET 请求
    response = requests.get(url, headers=headers, params=params)

    if response.status_code == 200:
        return response.text
    else:
        print(f"请求失败，状态码: {response.status_code}")

# 将字典数据保存为CSV文件,追加写入
def save_data(data: dict, file_path: str):
    """
    保存数据到文件
    :param data:
    :param file_path:
    :return:
    """

    import csv
    with open(file_path, "a", encoding="utf-8", newline="") as f:
        writer = csv.writer(f)
        writer.writerow(data.values())


# 解析网页组件
def parse_html(html: str):
    # 解析 HTML 文档
    soup = BeautifulSoup(html, "html.parser")
    # 找到class为comment-list score的div标签
    comments = soup.find_all("div", class_="comment-list score")
    # 遍历评论列表
    for comment in comments:
        # 找到class为comment-item的li标签
        items = comment.find_all("li", class_="comment-item")
        # 遍历评论项
        for item in items:
            # 被认为有用数vote-count的span标签内容：评论的点赞数
            vote_count = item.find("span", class_="vote-count").text.strip()
            # comment-info中的第一个a标签的内容：评论者名字
            author = item.find("span", class_="comment-info").find_all("a")[0].text.strip()
            # comment-info中的第二个a标签的内容：评论时间
            time = item.find("span", class_="comment-info").find_all("a")[1].text.strip()
            # 类名为comment-location的span标签内容：评论所在位置
            location = item.find("span", class_="comment-location").text.strip()
            # 评论内容
            content = item.find("span", class_="short").text.strip()
            # 保存为字典
            data = {
                "点赞数": vote_count,
                "评论者": author,
                "评论时间": time,
                "评论位置": location,
                "评论内容": content
            }
            save_data(data, "SpiderData.csv")


if __name__ == '__main__':
    import time
    import random
    # 爬完所有，直到没内容可爬
    page = 1
    while True:
        html = get_html(page, 20)
        if not html:
            break
        parse_html(html)
        page += 1
        print(f"第{page}页爬取完成！")
        time.sleep(random.randint(3,5))
