import csv
import os
import random
import time

import requests


import execjs
import json



def jiemi(data):
    s = json.dumps(data)
    js = execjs.compile(open("网易.js",mode="r", encoding="utf-8").read())
    dic = js.call("fn", s)


    real_data = {
        "params": dic['encText'],
        "encSecKey": dic['encSecKey']
    }
    return real_data



def qingqiu(real_data):
    url = "https://music.163.com/weapi/comment/resource/comments/get?csrf_token="
    resp = requests.post(url, data=real_data, headers={
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
    })
    return resp



# 读取文件内容
def parse(resp,extracted_data):
    data = resp.json()
    # 提取评论数据
    comments = data['data']['comments']
    cur = data['data']['cursor']
    # 创建一个列表来存储提取的信息
    date_map = {
        '1天前':'2024-6-22',
        '2天前':'2024-6-21',
        '3天前':'2024-6-20',
        '4天前':'2024-6-19',
        '5天前':'2024-6-18',
        '6天前':'2024-6-17',
        '7天前':'2024-6-16',
    }

    # 遍历评论数据，提取用户昵称和评论内容
    for comment in comments:
        nickname = comment['user']['nickname']
        content = comment['content'].replace('\n', '').replace('\r', '').replace('\t', '')
        location = comment['ipLocation']['location']
        com_time = comment['timeStr']
        if com_time in date_map:
            com_time = date_map[com_time]
            extracted_data.append([nickname, content, com_time, location])
        elif len(com_time.split('-')[0]) != 4:
            com_time = '2024-'+com_time
            extracted_data.append([nickname, content,com_time,location])
        else:
            extracted_data.append([nickname, content, com_time, location])
    output_file = '少卿游评论1111111.csv'
    if not os.path.exists(output_file):
        with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
            csvwriter = csv.writer(csvfile)
            csvwriter.writerows(extracted_data)
        print('文档创建并保存成功')
    else:
        # 如果文件存在，写入新数据
        with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
            csvwriter = csv.writer(csvfile)
            csvwriter.writerows(extracted_data)
        print(f"数据已保存到 {output_file} 文件中")

    return cur

if __name__ == '__main__':
    page = int(input('请输入你要爬取的页数：'))
    cur = -1
    extracted_data = [['用户', '评论','评论时间','评论所在地']]
    for i  in range(1, page + 1):
            data = {
                'csrf_token': "93f077b28d1f995e63bcd38b48d1c493",
                'cursor': cur,
                'offset': "0",
                'orderType': "1",
                'pageNo': f"{i}",
                'pageSize': "20",
                'rid': "R_SO_4_1442031484",
                'threadId': "R_SO_4_1442031484",
            }

            real_data = jiemi(data)

            print(f'正在爬取第{i}页')
            resp = qingqiu(real_data)
            cur = parse(resp,extracted_data)
            time.sleep(random.randint(10,30))

