import json
import random
import requests

from time import sleep

from absl import app

from util import packag_comment, logger

keywords = ['评论时间', '评论ID', '评论内容', '评论者id', '评论者名字', '评论者说说', '评论者性别', '评论者关注数', '评论者粉丝数', '点赞数']
excel_name = '黎娜娜_38.xls'


class GetLongWeibo:
    def __init__(self):
        self.count = 0  # 记录是爬取的第几条微博，也可以用作后面建表使用

    def get_weibo_comment(self, id, mid):
        hd = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
              'Accept-Encoding': 'gzip, deflate, br',
              'Connection': 'keep-alive',
              'Host': 'm.weibo.cn',
              'Upgrade-Insecure-Requests': '1',
              'user_agent': 'MMozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
              'Cookie': 'SCF=Atl-GJH959wN90W-riRIiBlEjMKixuXaDw6Agb7pCDrl5dQ5QxA9yYy7gs2kUAe5UfJf5xLI4o8wNDtzqVv1K6Q.; '
                        '_T_WM=502528b44065f9c113cffe7a6d82ce82; WEIBOCN_WM=20005_0002; SSOLoginState=1703406342; '
                        'ALF=1705998342; XSRF-TOKEN=888a30; '
                        'SUB'
                        '=_2A25Ig5tWDeRhGeNI6VAW9S7KzjmIHXVr4JKerDV6PUNbktANLXTykW1NRbTMY28PrqPjjs1wQYcgCZOThtZgd1Tw; '
                        'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFBckzH77QBJMIF9zHnKII_5JpX5K-hUgL.Fo-ceozNSK5cSK'
                        '-2dJLoIp7LxKML1KBLBKnLxKqL1hnLBoMfSozES0-7So-f; MLOGIN=1; '
                        'M_WEIBOCN_PARAMS=oid%3D4947694008140968%26luicode%3D10000011%26lfid%3D1076036244553417; '
                        'WEIBOCN_FROM=1110105030; mweibo_short_token=58dbaa681f',
              'User-Agent': 'Mozilla/5.0(Windows NT 10.0;Win64;x64;rv: 89.0) Gecko / 20100101Firefox / 89.0'
              }
        # id = '4948877918602103'  # 记得修改这里
        # mid = id  # 记得修改这里  TODO 直接请求一级页面获取这两个值 把第一次请求也变为自动化  已经通过自动化获取
        #  TODO 若是无法在一级页面直接获取这两个值，考虑建立一个txt或者excel将所有需要爬取的博文的地址放到这里 然后依次读取一定程度也可以节省一定的时间
        #  而且需要增加异常判断 若是爬取失败则 将其存入文本
        maxid = '0'
        maxtype = '0'
        maxidtype = '0'  # 爬取评论后面需要的几个字段
        first_url = 'https://m.weibo.cn/comments/hotflow?id=' + str(id) + '&mid=' + str(mid) + '&max_id_type=0'

        print('weibo_details{}'.format(first_url))
        resp = requests.get(first_url, headers=hd)
        r_content = resp.content
        m = str(r_content, encoding='utf-8')
        parsed_data = json.loads(m)
        data = parsed_data['data']
        user_comments = data['data']  # 评论包        # user_comments = user_comments[0]
        max_id = data['max_id']
        max_id_type = data['max_id_type']

        comment_page = 1
        index = 0
        weibo_comments = []  # 建立一个列表 用来存当个评论的用户信息
        for user_comment in user_comments:
            weibo_comment = packag_comment(user_comment)
            weibo_comments.append(weibo_comment)
            index += 1

        catch_count = 0
        try:
            while max_id != 0:
                comment_page += 1
                catch_count += 1
                if catch_count % 100 == 0:
                    sleep(30)
                else:
                    sleep_num = random.randint(7, 15)
                    sleep(sleep_num)
                second_url = 'https://m.weibo.cn/comments/hotflow?id=' + str(id) + '&mid=' + str(
                    mid) + '&max_id=' + str(
                    max_id) + '&max_id=' + str(max_id) + '&max_id_type=' + str(max_id_type)
                print(second_url)
                resp = requests.get(second_url, headers=hd)
                r_content = resp.content
                while 'found' in str(r_content) or 'MethodNotAllowed' in str(r_content):  # 请求太快导致请求失败或者被拒绝请求时重新请求
                    sleep_num = random.randint(5, 10)
                    sleep(sleep_num)
                    second_url = 'https://m.weibo.cn/comments/hotflow?id=' + str(id) + '&mid=' + str(
                        mid) + '&max_id=' + str(
                        max_id) + '&max_id=' + str(max_id) + '&max_id_type=' + str(max_id_type)
                    print(second_url)
                    resp = requests.get(second_url, headers=hd)
                    r_content = resp.content

                m = str(r_content, encoding='utf-8')
                parsed_data = json.loads(m)
                data = parsed_data['data']
                user_comments = data['data']  # 评论包
                max_id = data['max_id']
                max_id_type = data['max_id_type']
                print('当前爬取了{}页微博评论'.format(comment_page))
                for user_comment in user_comments:
                    index += 1
                    weibo_comments.append(packag_comment(user_comment))
                self.count += 1
            return weibo_comments
        except Exception as e:
            self.count += 1
            logger.exception(e)
            print('当前评论爬取失败___当前爬取了{}条评论___后续应该是并无评论存在'.format(index))
            self.count += 1
            return weibo_comments

def main(_):
    weibo = GetLongWeibo
    weibo.get_long_weibo()


if __name__ == '__main__':
    app.run(main)
