# 爬取微博任一帖子的评论数据
# 微博某帖子
# 分析页面，结合正则化，爬取评论数据，保存为csv文件，字段不限，数据量不少于1w条，推荐requests、selenium，scrapy

# 步骤如下:
# 目标爬取微博某一帖子的评论数据--帖子链接:https://weibo.com/1624923463/O7NBSq4IS
# 1.加载Cookie文件和api文件
# 2.写好爬取代码
# 3.查看api地址的调用规律
# 4.通过api调用获取数据
# 5.因为微博服务器请求太快会无法加载内容,所以设置异常处理保证程序正常运行
# 5.导入C.csv文件

import time
import requests
import csv

# 读取Cookie
cokf = open("Cookie.txt", mode="r")
Cookies = cokf.read()
cokf.close()

# 读取api
apif = open("api.txt", mode="r")
aapi = apif.readline()
bapi = apif.readline()
apif.close()


def api_load(nextid):
    global aapi
    aapi = bapi.format(nextid)


def get_dynamic_comment():
    header = {
        "Cookie": Cookies,
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                      "AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0"
    }
    resp = requests.get(aapi, headers=header)
    nextid = resp.json()["max_id"]
    api_load(nextid)
    try:
        for j in range(20):
            user_name = resp.json()["data"][j]['user']["screen_name"]
            user_comment = resp.json()["data"][j]['text_raw']
            create_place = resp.json()["data"][j]['source']
            csvwriter.writerow([user_name, user_comment, create_place])

    except IndexError:
        time.sleep(1)
        return


if __name__ == '__main__':
    # 计时
    t1 = time.time()
    # 初始化评论文件
    f = open("bak/C.csv", mode="a", encoding="utf-8")
    csvwriter = csv.writer(f)
    csvwriter.writerow(["用户名", "用户评论", "地区"])

    for i in range(1000):
        get_dynamic_comment()

    t2 = time.time()
    f.close()
    print("over!")
    print(f"花费时间为:{t2 - t1:.2f}秒")
