# 爬取视频网站某视频的弹幕数据
# 视频网站任选其一：腾讯、优酷、爱奇艺、芒果TV、Bilibili
# 分析页面，结合正则化，爬取评论数据，保存为csv文件，字段不限，数据量不少于1w条，推荐requests、selenium，scrapy

# 步骤如下:
# 目标爬取腾讯视频电视剧与凤行的弹幕数据
# 1.查看api地址的调用规律
# 2.用request写好爬取代码
# 3.通过api调用获取数据
# 4.导入G.csv文件

import time
import requests
import csv

# 热剧一集的弹幕太多,通过计数来控制获取的数据量
count = 0


def get_danmu():
    global count
    header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                      "AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0"
    }
    # 填写腾讯视频的参数，video_code是腾讯视频的编号，num是获取弹幕的次数，step是步进参数
    video_code = "d0048nosrlr"
    # 设置一个较大的请求次数，程序会自动判断，当没有弹幕了会自动退出循环
    num = 10000
    step = 30000
    for i in range(num):
        url = f'https://dm.video.qq.com/barrage/segment/{video_code}/t/v1/{i * 30000}/{i * 30000 + step}'
        response = requests.get(url=url).json()
        if (len(response["barrage_list"])) > 0:
            for line in response["barrage_list"]:
                csvwriter.writerow([line['id'], line['content']])
                count += 1
        if count >= 10000:
            break


if __name__ == '__main__':
    # 计时
    t1 = time.time()
    # 初始化评论文件
    f = open("bak/G.csv", mode="w", encoding="utf-8")
    csvwriter = csv.writer(f)
    csvwriter.writerow(["ID", "弹幕"])
    # 调用爬取函数
    get_danmu()

    print("总共爬取弹幕条数为:", count)

    t2 = time.time()
    f.close()
    print("over!")
    print(f"花费时间为:{t2 - t1:.2f}")
