import csv
import datetime
from DrissionPage import ChromiumPage

# 请求函数
def get_json(page):
    # 请求地址
    url = 'https://www.douyin.com/video/7362905440650366208?modeFrom=userPost&secUid=MS4wLjABAAAAompXkPoYOGsA152dqYoytKycjIZ_aCCxHwGmLX5IsDM'
    # 监听数据包
    page.listen.start('aweme/v1/web/comment/list/')
    try:
        # 访问请求地址
        page.get(url=url)
        # 获取前20页评论数据
        for i in range(20):
            print(f'正在获取第{i}页评论')
            # 将页面滑动到最底部
            page.scroll.to_bottom()
            # 等待数据包加载
            resp = page.listen.wait()
            # 直接获取数据包
            json_data = resp.response.body
            get_parse(data=json_data)
    except Exception as e:
        print(e)
    finally:
        page.quit()

def get_parse(data):
    dit_list = []
    # 解析数据
    comments = data['comments']
    # for循环遍历，提取列表里的元素
    for info in comments:
        # 昵称
        nickname = info['user']['nickname']
        # 认证
        custom_verify = info['user']['custom_verify']
        # 抖音号
        unique_id = info['user']['unique_id']
        # 标签
        signature = info['user']['signature']
        # 地区
        ip_label = info['ip_label']
        # 时间
        create_time = info['create_time']
        # 将时间戳转化为日期
        time = str(datetime.datetime.fromtimestamp(create_time))
        # 评论
        text = info['text']

        dit_list.append({
            '昵称': nickname,
            '认证': custom_verify,
            '抖音号': unique_id,
            '标签': signature,
            '地区': ip_label,
            '时间': time,
            '评论': text
        })
        save_csv(data = dit_list)

def save_csv(data):
    # 写入数据
    for line in data:
        writer.writerow(line)

if __name__ == '__main__':
    # 创建表头
    filenames = ['昵称', '认证', '抖音号', '标签', '地点', '时间', '评论']
    # 创建文件
    file = open('评论.csv', 'a', encoding='utf-8', newline='')
    # 创建DictWriter对象，并指定表头
    writer = csv.DictWriter(file, fieldnames=filenames)
    # 写入表头
    writer.writeheader()

    # 创建页面对象
    page = ChromiumPage()

    # 调用请求函数
    get_json(page=page)