# _*_  coding:utf-8 _*_
import sys
import time
from concurrent.futures.thread import ThreadPoolExecutor

import pandas as pd
import requests


class Spider:
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.8 Safari/537.36",
            "cookie": "SINAGLOBAL=1157605341662.2756.1625727130478; ULV=1627278253797:3:3:1:8971486200943.164.1627278253787:1625801070685; UOR=,,www.baidu.com; ALF=1658815545; SCF=AoXQHhCNQFbxkNONUOwpWg4Ec-ZE6QL9VWj_N2RSADjyiD0s7ph6TATPnokAx7hwLcYRr0D6ND3ssc4RN9_n-08.; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhBZxey-YvQUw0bs-GriblC5NHD95QcShM41KMfehBXWs4Dqcj_i--fiKy2iKyWi--NiK.0i-zci--RiKn7iKnpi--fiKysi-8si--Xi-z4iK.7; SUB=_2A25N-xmFDeRhGeBK7FsY9yvMzziIHXVvB6fNrDV8PUJbkNAKLVjukW1NR5L6XopvEsyZEcxmiRAUskUDFFjzSrvn; XSRF-TOKEN=6C_aqkOpvCu3uTGx665OnPL8; WBPSESS=UW5aoXrbB4-nockeRsgyxX-7rr-Tu5XECf3CoPxYgqdIuijgV-Bs8AiOJzN9H8-yZedICgUW4Dn9uneFJksomqztahRCDK3uPC9jFyCF1Q57V4Cd1vLlZWuz_hGNshjV"
        }
        self.url = "https://weibo.com/ajax/profile/getWaterFallContent?uid={}&cursor={}"
        self.data_dict = {}

    def read_excel(self):
        excel_list = pd.read_excel("./files/SZA-PR高频原创视频KOL list（更新）(1).xlsx", sheet_name="新浪微博", engine="openpyxl")
        identify_id_list = []
        for data in excel_list.values:
            if not pd.isna(data[3]):
                identify_id_list.append(int(data[3]))
        return identify_id_list

    def crawl(self, identify_id, number, cur):
        reposts_count = 0
        comments_count = 0
        attitudes_count = 0
        online_users_number = 0
        while 1:
            try:
                json_ = requests.get(self.url.format(str(identify_id), cur), headers=self.headers)
                if json_.status_code == 200:
                    json_ = json_.json()
                    break
            except:
                time.sleep(1)
                print("请求错误，正在重试")
        cur = json_["data"]["next_cursor"]
        for info in json_["data"]["list"]:
            # 转发量
            reposts_count += info["reposts_count"]
            # 评论量
            comments_count += info["comments_count"]
            # 点赞量
            attitudes_count += info["attitudes_count"]
            # 播放量
            try:
                online_users_number += info["page_info"]["media_info"]["online_users_number"]
            except:
                online_users_number += 0
            number += 1
            if number == 30:
                return reposts_count, comments_count, attitudes_count, online_users_number, cur, number
        return reposts_count, comments_count, attitudes_count, online_users_number, cur, number

    def parse(self, identify_id):
        reposts_count = 0
        comments_count = 0
        attitudes_count = 0
        online_users_number = 0
        number = 0
        cur = 0
        while number < 30:
            reposts_count_new, comments_count_new, attitudes_count_new, online_users_number_new, cur, number = self.crawl(
                identify_id, number,
                cur)
            reposts_count += reposts_count_new
            comments_count += comments_count_new
            attitudes_count += attitudes_count_new
            online_users_number += online_users_number_new
            if cur == -1:
                break

        reposts_ave = int(reposts_count / number)
        comments_ave = int(comments_count / number)
        attitudes_ave = int(attitudes_count / number)
        online_users_ave = int(online_users_number / number)
        print(identify_id)
        print("转发平均：" + str(reposts_ave))
        print("评论平均：" + str(comments_ave))
        print("点赞平均：" + str(attitudes_ave))
        print("播放平均：" + str(online_users_ave))
        print("实际抓取数量：" + str(number))
        self.data_dict[identify_id] = (reposts_ave, comments_ave, attitudes_ave, online_users_ave, number)
        print(reposts_ave, comments_ave, attitudes_ave, online_users_ave, number)

    def update_excel(self):
        excel_data = pd.read_excel("./files/SZA-PR高频原创视频KOL list（更新）(1).xlsx", sheet_name="新浪微博", engine="openpyxl")
        for i in self.data_dict.keys():
            excel_data['实际抓取视频数'][excel_data['identify_id'] == i] = self.data_dict[i][4]
            excel_data['直发视频平均转发数'][excel_data['identify_id'] == i] = self.data_dict[i][0]
            excel_data['直发视频平均评论数'][excel_data['identify_id'] == i] = self.data_dict[i][1]
            excel_data['直发视频平均点赞数'][excel_data['identify_id'] == i] = self.data_dict[i][2]
            excel_data['直发视频平均播放数'][excel_data['identify_id'] == i] = self.data_dict[i][3]
            print(excel_data)
            excel_data.to_excel('SZA-PR高频原创视频KOL list（更新）(1)_spider.xlsx', sheet_name='Sheet1', index=False,
                                header=True)


if __name__ == '__main__':
    spider = Spider()
    identify_id_list = spider.read_excel()
    # for identify_id in identify_id_list:
    #     spider.parse(identify_id)
    with ThreadPoolExecutor(max_workers=5) as pool:
        futures = [pool.submit(spider.parse, identify_id) for identify_id in identify_id_list]
        for futures in futures:
            print(futures.result())
    spider.update_excel()
