# 爬取电商任一类型商品的评论数据；
# 电商网站任选其二：京东、淘宝、拼多多；
# 分析页面，结合正则化，爬取评论数据，保存为csv文件，字段不限，数据量不少于1w条，推荐requests、selenium，scrapy。

# 步骤如下:
# 目标爬取京东和淘宝的某一类商品评论数据
# 1.加载Cookie文件和api文件
# 2.写好爬取解析代码
# 3.利用线程池进行高效爬取
# 4.导入comments.csv文件

import time
import random
import requests
from concurrent.futures import ThreadPoolExecutor
import csv

# 读取Cookie
cokf = open("Cookie.txt", mode="r")
Cookies = cokf.read()
cokf.close()

# 读取api
apif = open("api.txt", mode="r")
api_list = []
for i in range(6):
    api_list.append(apif.readline())
apif.close()


def get_goods_comment():
    header = {
        "Cookie": Cookies,
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                      "AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0"
    }
    api = api_list[random.randint(0, 5)]
    for page in range(1, 50):
        resp = requests.get(api.format(page), headers=header)
        for j in range(10):
            user_name = resp.json()["comments"][j]['nickname']
            user_comment = resp.json()["comments"][j]['content']
            create_time = resp.json()["comments"][j]['creationTime']
            csvwriter.writerow([user_name, user_comment, create_time])


if __name__ == '__main__':
    # 计时
    t1 = time.time()
    # 初始化评论文件
    f = open("bak/comments.csv", mode="a", encoding="utf-8")
    csvwriter = csv.writer(f)

    # 创建线程池
    with ThreadPoolExecutor(50) as t:
        for i in range(300):
            t.submit(get_goods_comment)

    t2 = time.time()
    f.close()
    print("over!")
    print(f"花费时间为:{t2 - t1:.2f}")
