import hashlib
import requests
from datetime import datetime
import json
import random
import sys
import time
import os
import urllib.parse
from utils.redis_helper import RedisHelper
from utils.utilities import message_head
import utils.general_settings as general_settings
import twitter_tokens


class TweetContentExtractor:
    @staticmethod
    def process_date(datetime_str):
        """处理获取的数据中包含的日期，将其变为统一格式
        datetime_str：待格式化的日期字符串
        return：正常返回格式化后的字符串，不正常返回空list
        """
        fmt = "%a %b %d %H:%M:%S %Y"
        try:
            dt = datetime.strptime(str(datetime_str).replace('+0000 ', ''), fmt)
            return dt.strftime("%Y-%m-%d %H:%M:%S").split(' ')
        except Exception as ex:
            print(message_head(1) + "日期处理错误")
            print(message_head(1) + datetime_str)
            print(message_head(1), ex)
            return []

    @staticmethod
    def extract_reply_to(txt):
        """从正文full_text中提取回复对象
        txt：待处理的正文
        return：返回回复对象的拼接字符串
        """
        content = []
        reply_to = []
        reply_end = False
        for word in str(txt).split(' '):
            if word.startswith('@') and not reply_end:
                reply_to.append(word)
            else:
                reply_end = True
                content.append(word)
        return ' '.join(reply_to)

    @staticmethod
    def extract_searched_scroll(response_json):
        try:
            # 不同数据的instruction下key的数量不同，可能是一个或两个，按照不同路径取最后一个的entry
            if len(response_json['timeline']['instructions']) > 1:
                entries = response_json['timeline']['instructions'][-1]['replaceEntry']['entry']
                # 如果存在‘operation’，则提取content，否则数据表现形式发生改变，返回None
                if 'operation' in entries['content'].keys():
                    content = entries['content']
                else:
                    print(message_head(1) + "无法正确获取searched_scroll！")
                    print(message_head(1), response_json)
                    return None
            else:
                entries = response_json['timeline']['instructions'][-1]['addEntries']['entries']
                content = entries[len(entries) - 1]['content']
            next_scroll = urllib.parse.quote(str(content['operation']['cursor']['value']))
            return next_scroll
        except Exception as ex:
            print(message_head(1) + "无法正确获取searched_scroll！")
            print(message_head(1), ex)
            print(message_head(1), response_json)
            return None

    @staticmethod
    def extract_searched_tweets(response_json, start_date, end_date):
        """提取通过搜索获取的数据中的推文信息，非起止时间区间的推文不获取（被引用的推文也会爬到，有可能不是所需要时间段的）
        tweets_json：待处理json数据
        start_date：起始日期
        end_date：终止日期
        return：返回一个字典,, scroll（下一指针位置） key:scroll
        """
        tweet_contents = []

        try:
            tweets = response_json['globalObjects']['tweets']
            users = response_json['globalObjects']['users']
        except Exception as ex:
            print(message_head(1) + "无法正确提取tweets和users！")
            print(message_head(1), ex)
            print(message_head(1), response_json)
            return None

        for tweet in tweets:
            try:
                content_dict = {}
                content_dict['user_id'] = tweets[tweet]['user_id_str']
                content_dict['user_screen_name'] = users[tweets[tweet]['user_id_str']]['screen_name']
                content_dict['user_name'] = users[tweets[tweet]['user_id_str']]['name']
                content_dict['src_user_id'] = tweets[tweet]['user_id_str']
                content_dict['src_user_screen_name'] = users[tweets[tweet]['user_id_str']]['screen_name']
                content_dict['src_user_name'] = users[tweets[tweet]['user_id_str']]['name']
                content_dict['tweet_id'] = content_dict['user_id'] + '/status/' + tweets[tweet]['id_str']
                hl = hashlib.md5()
                hl.update(str(content_dict['tweet_id']).encode(encoding='utf-8'))
                content_dict['_id'] = hl.hexdigest()
                content_dict['full_text'] = tweets[tweet]['full_text']
                content_dict['reply_to'] = tweets[tweet]['in_reply_to_screen_name']
                content_dict['reply_count'] = tweets[tweet]['reply_count']
                content_dict['retweet_count'] = tweets[tweet]['retweet_count']
                content_dict['quote_count'] = tweets[tweet]['quote_count']
                content_dict['favorite_count'] = tweets[tweet]['favorite_count']
                content_dict['lang'] = tweets[tweet]['lang']
                hashtags = []
                for hashtag in tweets[tweet]['entities']['hashtags']:
                    hashtags.append("#" + hashtag['text'])
                content_dict['hashtag'] = ' '.join(hashtags)
                content_dict['created_date'], content_dict['created_time'] = TweetContentExtractor.process_date(
                    tweets[tweet]['created_at'])
                # 该推文日期不是需要的日期，不保存
                if end_date <= content_dict['created_date'] or content_dict['created_date'] < start_date:
                    continue
                content_dict['extract_date'] = datetime.strftime(datetime.now(), "%Y-%m-%d")
                content_dict['extract_time'] = datetime.strftime(datetime.now(), "%H:%M:%S")
                content_dict['tweet_json'] = tweets[tweet]
                tweet_contents.append(content_dict)
            except Exception as ex:
                print(message_head(1) + "无法正确提取tweet数据！")
                print(message_head(1), ex)
                print(message_head(1), tweet)
                return None
        return tweet_contents

    @staticmethod
    def extract_searched_user_info(data):
        """提取搜索得到的推文中的userinfo信息
        data：待处理数据json
        return：user_list
        """
        user_list = []
        try:
            users = data['globalObjects']['users']
            for user in users:
                user_info = {}
                user_info['uid'] = users[user]['id_str']  # 用户数字ID，不会变化
                hl = hashlib.md5()
                hl.update(str(user_info['uid']).encode(encoding='utf-8'))
                user_info['_id'] = hl.hexdigest()
                user_info['screen_name'] = users[user]['screen_name']  # 用户文本ID（@后面的字符），可以更改
                user_info['name'] = users[user]['name']  # 用户名称，可以更改
                user_info['created_date'], user_info['created_time'] = TweetContentExtractor.process_date(
                    users[user]['created_at'])  # 账户创建时间
                user_info['description'] = users[user]['description']  # 用户自己填的个人描述
                user_info['location'] = users[user]['location']  # 用户位置
                user_info['favourites_count'] = users[user]['favourites_count']  # 给别人点赞的数量
                user_info['followers_count'] = users[user]['followers_count']  # 粉丝数
                user_info['friends_count'] = users[user]['friends_count']  # 关注数
                user_info['tweet_count'] = users[user]['statuses_count']  # 推文数量
                user_info['data'] = users[user]  # 用户信息json，包含全部信息
                user_info['update_date'] = datetime.strftime(datetime.now(), "%Y-%m-%d")  # 更新日期
                user_list.append(user_info)
            return user_list
        except Exception as ex:
            print(message_head(1) + "无法正确提取user数据")
            print(message_head(1), ex)
            print(message_head(1), data)
            return None

    @staticmethod
    def extract_timeline_user_info(data: dict):
        """提取通过主页时间线获取的userinfo
        data：待处理的json数据
        return：user_info
        """
        try:
            legacy = data['data']['user']['result']['legacy']
            user_info = {}
            user_info['uid'] = data['data']['user']['result']['rest_id']  # 用户数字ID，不会变化
            hl = hashlib.md5()
            hl.update(str(user_info['uid']).encode(encoding='utf-8'))
            user_info['_id'] = hl.hexdigest()
            user_info['screen_name'] = legacy['screen_name']  # 用户文本ID（@后面的字符），可以更改
            user_info['name'] = legacy['name']  # 用户名称，可以更改
            user_info['created_date'], user_info['created_time'] = TweetContentExtractor.process_date(legacy['created_at'])
            user_info['description'] = legacy['description']  # 用户自己填的个人描述
            user_info['location'] = legacy['location']  # 用户位置
            user_info['favourites_count'] = legacy['favourites_count']  # 给别人点赞的数量
            user_info['followers_count'] = legacy['followers_count']  # 粉丝数
            user_info['friends_count'] = legacy['friends_count']  # 关注数
            user_info['tweet_count'] = legacy['statuses_count']  # 推文数量
            user_info['data'] = data  # 用户信息json，包含全部信息
            user_info['update_date'] = datetime.strftime(datetime.now(), "%Y-%m-%d")  # 更新日期
            return user_info
        except Exception as e:
            print(message_head(1) + "提取userinfot出错")
            print(message_head(1), e)
            print(message_head(1), data)
            return None

    @staticmethod
    def extract_timeline_tweet_content(tweet, start_date, start_time):
        """获取单个tweet json数据中的内容，正常返回推文数据和继续标志位（False），遇到设置的时间和日期则停止并返回空数据和终止标志位（True）
        data：待处理的json数据
        return：返回一个字典，分别表示
        """
        result_dict = {'processed': False, 'data': None, 'reach_start': False}
        content_dict = {}
        try:
            # 该推文已被删除
            if 'TweetTombstone' == tweet['content']['itemContent']['tweet_results']['result']['__typename']:
                return result_dict
            # 该推文因为违反了Twitter 规则已无法显示
            if 'TimelineTombstone' == tweet['content']['itemContent']['itemType']:
                return result_dict
            # 用户限制查看该推文，会提示 TweetUnavailable
            if 'TweetUnavailable' == tweet['content']['itemContent']['tweet_results']['result']['__typename']:
                return result_dict
            core = tweet['content']['itemContent']['tweet_results']['result']['core']
            legacy = tweet['content']['itemContent']['tweet_results']['result']['legacy']
            content_dict['user_id'] = core['user_results']['result']['rest_id']  # 用户数字ID，不可变
            content_dict['user_screen_name'] = core['user_results']['result']['legacy']['screen_name']  # 用户名称,@后面的字符
            content_dict['user_name'] = core['user_results']['result']['legacy']['name']  # 用户昵称
        except Exception as ex:
            print(message_head(1) + "extract_timeline_tweet_content出错")
            print(message_head(1), ex)
            print(message_head(1), tweet)
            result_dict['data'] = tweet
            return result_dict

        retweeted_status_result = ''
        for key in legacy.keys():
            if 'retweeted_status' in key:
                retweeted_status_result = key

        if len(retweeted_status_result) > 0:
            content_dict['src_user_id'] = legacy[retweeted_status_result]['result']['core']['user_results']['result'][
                'rest_id']  # 转推源用户ID
            content_dict['src_user_screen_name'] = \
                legacy[retweeted_status_result]['result']['core']['user_results']['result']['legacy'][
                    'screen_name']  # 转推源用户名称,@后面的字符
            content_dict['src_user_name'] = \
                legacy[retweeted_status_result]['result']['core']['user_results']['result']['legacy']['name']  # 转推源用户昵称
            content_dict['tweet_id'] = content_dict['src_user_id'] + '/status/' + \
                                       legacy[retweeted_status_result]['result']['rest_id']
            hl = hashlib.md5()
            hl.update(str(content_dict['tweet_id']).encode(encoding='utf-8'))
            content_dict['_id'] = hl.hexdigest()
            content_dict['pinned'] = '0'
            content_dict['full_text'] = legacy[retweeted_status_result]['result']['legacy']['full_text']
            content_dict['reply_count'] = legacy[retweeted_status_result]['result']['legacy']['reply_count']  #
            content_dict['retweet_count'] = legacy[retweeted_status_result]['result']['legacy']['retweet_count']
            content_dict['quote_count'] = legacy[retweeted_status_result]['result']['legacy']['quote_count']
            content_dict['favorite_count'] = legacy[retweeted_status_result]['result']['legacy']['favorite_count']
            content_dict['lang'] = legacy[retweeted_status_result]['result']['legacy']['lang']
        else:
            content_dict['src_user_id'] = content_dict['user_id']
            content_dict['src_user_screen_name'] = content_dict['user_screen_name']  # 用户名称,@后面的字符
            content_dict['src_user_name'] = content_dict['user_name']  # 用户昵称
            content_dict['tweet_id'] = content_dict['user_id'] + '/status/' + legacy['id_str']
            hl = hashlib.md5()
            hl.update(str(content_dict['tweet_id']).encode(encoding='utf-8'))
            content_dict['_id'] = hl.hexdigest()
            content_dict['full_text'] = legacy['full_text']
            content_dict['reply_count'] = legacy['reply_count']  #
            content_dict['retweet_count'] = legacy['retweet_count']
            content_dict['quote_count'] = legacy['quote_count']
            content_dict['favorite_count'] = legacy['favorite_count']
            content_dict['lang'] = legacy['lang']

        content_dict['created_date'], content_dict['created_time'] = TweetContentExtractor.process_date(legacy['created_at'])
        # 不满足日期大于等于要求日期且时间大于等于要求时间，返回
        if not (content_dict['created_date'] >= start_date and content_dict['created_time'] >= start_time):
            result_dict['reach_start'] = True

        in_reply_to_screen_name = 'in_reply_to_screen_name'
        content_dict['reply_to'] = legacy[in_reply_to_screen_name] if in_reply_to_screen_name in legacy.keys() else ''
        hashtags = []
        for hashtag in legacy['entities']['hashtags']:
            hashtags.append("#" + hashtag['text'])
        content_dict['hashtag'] = ' '.join(hashtags)
        # content_dict['next_scroll'] = scroll
        content_dict['extract_date'] = datetime.strftime(datetime.now(), "%Y-%m-%d")
        content_dict['extract_time'] = datetime.strftime(datetime.now(), "%H:%M:%S")
        content_dict['tweet_json'] = tweet
        result_dict['processed'] = True
        result_dict['data'] = content_dict
        return result_dict

    @staticmethod
    def extract_timeline_pinned(data):
        """获取置顶的推文
        data：时间线个人信息数据
        return：没有置顶推文为None，有置顶为对应数据
        """
        if len(data['data']['user']['result']['timeline']['timeline']['instructions']) > 1:
            tweet = data['data']['user']['result']['timeline']['timeline']['instructions'][1]['entry']
            result_dict = TweetContentExtractor.extract_timeline_tweet_content(tweet, "1000-01-01", "01:01:01")
            return result_dict
        else:
            return None

    @staticmethod
    def extract_TimelineAddEntries(data):
        entries = None
        try:
            instructions = data['data']['user']['result']['timeline']['timeline']['instructions']
            for instruction in instructions:
                if instruction['type'] == 'TimelineAddEntries':
                    entries = instruction['entries']
            return entries
        except Exception as ex:
            print(message_head(1) + "解析instructions失败！")
            print(message_head(1), ex)
            return None

    @staticmethod
    def extract_timeline_tweets(data, start_date, start_time):
        """获取时间线推文"""
        entries = TweetContentExtractor.extract_TimelineAddEntries(data)
        tw_content_dict = {'content': [], 'unprocessed': [], 'reach_start': False}
        if entries is None:
            print(message_head(1) + "获取entries失败！")
            return tw_content_dict

        # 为2，则没有新数据，只有cursor的top和bottom
        if len(entries) == 2:
            return tw_content_dict
        # 对每个entry提取推文信息
        for tweet in entries:
            if 'cursor-' in tweet['entryId']:
                continue
            result_dict = TweetContentExtractor.extract_timeline_tweet_content(tweet, start_date, start_time)
            if not result_dict['processed']:
                tw_content_dict['unprocessed'].append(tweet)
            else:
                tw_content_dict['content'].append(result_dict['data'])
            # 到达时间点，直接返回当前数据
            if result_dict['reach_start']:
                tw_content_dict['reach_start'] = True
                return tw_content_dict
        return tw_content_dict

    @staticmethod
    def pack_processed_data(db, collection, data):
        tweets_data = {}
        tweets_data['db'] = db
        tweets_data['collection'] = collection
        tweets_data['data'] = data
        return tweets_data

    @staticmethod
    def pack_unprocessed_data(command: dict, data, data_db, data_collection, error):
        unprocessed_data = {}
        unprocessed_data['db'] = command['unprocessed_db']
        unprocessed_data['collection'] = command['unprocessed_collection']
        unprocessed_data['data'] = {'date': datetime.now().strftime("%Y-%m-%d"), 'time': datetime.now().strftime("%H:%M:%S"),
                                    'db': data_db, 'collection': data_collection, 'data': data, 'error': error}
        return unprocessed_data


class TwitterScraper:
    def __init__(self, twitter_config):
        self._command = None
        self._header = dict(twitter_config.header).copy()
        self._redis_client = RedisHelper(host=twitter_config.redis_host, port=twitter_config.redis_port)
        self._twitter_config = twitter_config
        self._token = None
        self._token_producer = twitter_tokens.TokenProducer(twitter_config.chromedriver_path, twitter_config.chromedriver_url,
                                                            twitter_config.zip_file_name)

    def _update_guest_token_in_header(self):
        loop_count = 0
        response = None
        while True:
            try:
                response = requests.post(self._twitter_config.guest_token_url, headers=self._twitter_config.header).text
                self._token = json.loads(response)['guest_token']
                break
            except Exception as e:
                self._token = None
                loop_count += 1
                print(message_head(1) + "guest_token_url获取guest_token失败！")
                print(message_head(1), e)
                print(message_head(1), response)
                if loop_count > 5:
                    break
                else:
                    time.sleep(120)

        if self._token is None:
            while True:
                token = self._token_producer.get_token()
                if token is not None:
                    self._token = token
                    break
                else:
                    time.sleep(300)
        print(message_head(0) + "获取token", self._token)
        self._header['x-guest-token'] = self._token

    def _complete_header(self, path_key):
        """填充request的header，需要补全path，referer，以及x-guest-token"""
        try:
            self._header['path'] = str(self._command[path_key]).replace('https://twitter.com', '')
            self._header['referer'] = str(self._command['referer'])
            # 如果token为None，则获取新token；有数据则复用，复用使如发现已失效则会将token参数重置为None
            if self._token is None:
                self._update_guest_token_in_header()
        except Exception as ex:
            print(message_head(1) + "补全header失败！")
            print(message_head(1) + "header:", str(self._header))
            print(message_head(1) + "command:", str(self._command))
            print(message_head(1), ex)
            return None

    def _get_response(self, url):
        url_error_count = 0
        while True:
            try:
                response = requests.get(url, headers=self._header, timeout=20)
                time.sleep(5 + random.random() * 5)  # 空出时间
                return response
            except Exception as ex:
                # 能ping通返回0，ping不通返回1，两者ping通一个即可
                # ping 8.8.8.8 和114.114.114.114都不通，大概率网络中断，等待300秒再次测试网络
                while os.system(u"ping 8.8.8.8") and os.system(u"ping 114.114.114.114"):
                    time.sleep(300)
                url_error_count += 1
                # 连续十次获取失败，输出错误提示，返回空值
                # print(message_head(1) + "重新打开url")
                time.sleep(30)
                # 每重试10次，重新获取一次token
                if url_error_count % 5 == 0:
                    self._update_guest_token_in_header()
                # 多次获取失败后该如何处理，执行下一个还是继续？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？
                if url_error_count > 20:
                    print(message_head(1) + "打开url失败！")
                    print(url)
                    print(ex)
                    return None

    def _scrape_timeline(self):
        forbidden_count = 0
        data_error_count = 0

        while True:
            self._complete_header('userinfo_url')
            # 如果header为空，表示传过来的命令有缺失的信息，不予执行命令并抛弃
            if self._header is None:
                return

            response = self._get_response(self._command['userinfo_url'])
            if response is None:
                self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                print(message_head(1) + "命令返回到错误队列................")
                print(message_head(1), self._command)
                return

            data = json.loads(response.text)
            # 前10个字符存在data则表示存在数据
            if 'data' not in data.keys():
                # 访问被服务器禁止，更新token
                if 'Forbidden' in str(data):
                    forbidden_count += 1
                    data_error_count = 0
                    # 获取5次token依旧没有解决问题，退出循环，进行下一个命令
                    if forbidden_count > 5:
                        print(message_head(1) + "连续获取token超过5次，退出循环")
                        self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                        return
                    # print(message_head(0) + "获取新token")
                    self._update_guest_token_in_header()
                # 服务超出twitter服务器容量限制，不停等待并重新获取即可
                elif 'Over capacity' in str(data):
                    data_error_count = 0
                    forbidden_count = 0
                    print(message_head(1) + "Over capacity，请求超出服务器容量.............")
                    time.sleep(120)
                else:
                    forbidden_count = 0
                    data_error_count += 1
                    print(message_head(1) + "timeline response数据错误")
                    print(message_head(1), data)
                    time.sleep(30)
                    # 连续20次数据错误，可能是twitter数据格式变化，返回任务
                    max_error = 10
                    if data_error_count > max_error:
                        self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                        print(message_head(1) + "timeline response连续 " + str(max_error) + " 次数据错误")
                        sys.exit()
            else:
                data_error_count = 0
                user_info = TweetContentExtractor.extract_timeline_user_info(data)
                # 获取的userinfo为空，表示response数据有误，大概率是数据格式改变，停止爬取
                if user_info is None:
                    unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['userinfo_collection'], 'timeline_user_info')
                    self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                    print(message_head(1) + "提取user_info出错，命令返回到错误队列................")
                    print(message_head(1), self._command)
                    return
                else:
                    processed_data = TweetContentExtractor.pack_processed_data(self._command['db'], self._command['userinfo_collection'], user_info)
                    self._redis_client.write_list(self._twitter_config.response_queue, processed_data)
                    break

        # timeline的response包含置顶推文和下一个cursor
        # 获取userId和设置scroll的初始值
        userId = data['data']['user']['result']['rest_id']
        cursor = ""
        tweets_empty_count = 0
        forbidden_count = 0
        scrape_count = 0
        while True:
            new_url = str.format(self._command['timeline_url'], userId, cursor)
            self._header['path'] = new_url.replace('https://twitter.com', '')
            response = self._get_response(new_url)
            # response为空，则获取失败，命令放入错误队列
            if response is None:
                self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                return

            data = json.loads(response.text)
            # 数据不正常
            if 'data' not in data.keys():
                # 访问被服务器禁止，更新token
                if 'Forbidden' in str(data):
                    forbidden_count += 1
                    data_error_count = 0
                    # 获取5次token依旧没有解决问题，退出循环，进行下一个命令
                    if forbidden_count > 5:
                        print(message_head(1) + "连续获取token超过5次，退出循环")
                        self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                        return
                    # print(message_head(0) + "获取新token")
                    self._update_guest_token_in_header()
                # 服务超出twitter服务器容量限制，不停等待并重新获取即可
                elif 'Over capacity' in str(data):
                    forbidden_count = 0
                    data_error_count = 0
                    print(message_head(1) + "Over capacity，请求超出服务器容量.............")
                    time.sleep(120)
                else:
                    data_error_count += 1
                    forbidden_count = 0
                    print(message_head(1) + "timeline response数据错误")
                    print(message_head(1), data)
                    time.sleep(30)
                    # 连续20次数据错误，可能是twitter数据格式变化，返回任务
                    max_error = 10
                    if data_error_count > max_error:
                        self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                        print(message_head(1) + "response连续 " + str(max_error) + " 次数据错误")
                        unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['tweet_collection'], 'timeline_tweet')
                        self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                        return
            else:
                forbidden_count = 0
                data_error_count = 0
                # 时间线第一条可能是置顶推特
                # if scrape_count <= 1:
                #     pinned = TweetContentExtractor.extract_timeline_pinned(data)
                #     if pinned is None:
                #         pass
                #     elif pinned['processed']:
                #         processed_data = TweetContentExtractor.pack_processed_data(self._command['db'], self._command['tweet_collection'], data)
                #         self._redis_client.write_list(self._twitter_config.response_queue, processed_data)
                #     else:
                #         # 增加该推文本应该存储的位置信息
                #         unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['tweet_collection'], 'timeline_pinned')
                #         self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                tw_content_dict = TweetContentExtractor.extract_timeline_tweets(data, self._command['start_date'], self._command['start_time'])
                # True表示已到达要求日期的终点，可以停止
                if tw_content_dict['reach_start']:
                    print(message_head(0) + "已爬取到要求日期终点！共获取【" + str(scrape_count) + "】个请求数据......................")
                    break
                # 为0表示已到底部，没有新数据
                if len(tw_content_dict['content']) == 0:
                    tweets_empty_count += 1
                    if tweets_empty_count > 5:
                        print(message_head(0) + "爬取完毕！共获取【" + str(scrape_count) + "】个请求数据......................")
                        break
                else:
                    processed_data = TweetContentExtractor.pack_processed_data(self._command['db'], self._command['tweet_collection'], tw_content_dict['content'])
                    self._redis_client.write_list(self._twitter_config.response_queue, processed_data)
                    scrape_count += 1
                # 把无法成功解析的数据打包上传
                if len(tw_content_dict['unprocessed']) > 0:
                    unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['tweet_collection'], 'timeline_tweet')
                    self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)

                entries = TweetContentExtractor.extract_TimelineAddEntries(data)
                if entries is None:
                    print(message_head(1) + "获取entries失败！")
                    break
                if 'Bottom' == entries[-1]['content']['cursorType']:
                    scroll = urllib.parse.quote(str(entries[-1]['content']['value']))
                    cursor = 'cursor%22%3A%22' + scroll + "%22%2C%22"
                    print(message_head(0) + "获取第 " + str(scrape_count) + " 个请求数据，本次推文共  " + str(len(tw_content_dict['content'])) + "\t\t获取" + cursor)
                else:
                    print(message_head(1) + "json未找到scroll")
                    print(message_head(1), data)
                    break

    def _scrape_search(self):
        tweets_empty_count = 0
        forbidden_count = 0
        scrape_count = 0
        data_error_count = 0
        replace_str = 'pc=1&spelling_corrections=1'
        scroll = ""
        self._complete_header('url')
        # 如果header为空，表示传过来的命令有缺失的信息，不予执行命令并抛弃
        if self._header is None:
            return
        while True:
            # 每100次输出一次任务信息
            if scrape_count % 100 == 0:
                print(message_head(0) + "当前任务：" + str(self._command))
                # self._update_guest_token_in_header()
            cursor = 'cursor=' + scroll + "&" + replace_str if len(scroll) > 0 else replace_str
            new_url = str(self._command['url']).replace(replace_str, cursor)
            self._header['path'] = new_url.replace('https://twitter.com', '')

            response = self._get_response(new_url)

            # 如果打开url有误，则命令入库
            if response is None:
                self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                print(message_head(1) + "命令返回到错误队列")
                print(message_head(1), self._command)
                return

            data = json.loads(str(response.text))
            # 存在globalObjects则表示存在数据
            if 'globalObjects' not in data.keys():
                # 访问被服务器禁止，更新token
                if 'Forbidden' in str(data):
                    forbidden_count += 1
                    data_error_count = 0
                    # 获取5次token依旧没有解决问题，退出循环，进行下一个命令
                    if forbidden_count > 5:
                        print(message_head(1) + "连续获取token超过5次，退出循环")
                        self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                        return
                    print(message_head(0) + "获取新token")
                    self._update_guest_token_in_header()
                # 服务超出twitter服务器容量限制，不停等待并重新获取即可
                elif 'Over capacity' in str(data):
                    forbidden_count = 0
                    data_error_count = 0
                    print(message_head(1) + "Over capacity，请求超出服务器容量.............")
                    time.sleep(120)
                else:
                    data_error_count += 1
                    forbidden_count = 0
                    print(message_head(1) + "search response数据错误")
                    print(message_head(1), data)
                    time.sleep(30)
                    # 连续10次数据错误，可能是twitter数据格式变化，返回任务
                    max_error = 10
                    if data_error_count > max_error:
                        self._redis_client.write_list(self._twitter_config.error_task_queue, self._command)
                        print(message_head(1) + "response连续 " + str(max_error) + " 次数据错误")
                        unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['tweet_collection'], 'searched_response')
                        self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                        sys.exit()
            else:
                forbidden_count = 0
                data_error_count = 0
                # 解析数据，返回一个字典，成功了则key：processed为True，不成功则为False。key：data表示数据
                tweets = TweetContentExtractor.extract_searched_tweets(data, self._command['start_date'], self._command['end_date'])
                if tweets is None:
                    unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['tweet_collection'], 'searched_tweets')
                    self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                elif len(tweets) == 0:
                    tweets_empty_count += 1
                    if tweets_empty_count > 5:
                        print(message_head(0) + "爬取完毕！共获取【" + str(scrape_count - 1) + "】个请求数据...............")
                        break
                else:
                    scrape_count += 1
                    tweets_empty_count = 0
                    scroll = TweetContentExtractor.extract_searched_scroll(data)
                    # 能获取到scroll，把scroll加入到tweets中存入数据库
                    if scroll is not None:
                        for tweet in tweets:
                            tweet['scroll'] = scroll
                        processed_data = TweetContentExtractor.pack_processed_data(self._command['db'], self._command['tweet_collection'], tweets)
                        self._redis_client.write_list(self._twitter_config.response_queue, processed_data)
                    else:
                        print(message_head(1) + "获取scroll失败")
                        unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['tweet_collection'], 'searched_scroll')
                        self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                        return

                users = TweetContentExtractor.extract_searched_user_info(data)
                if users is None:
                    unprocessed_data = TweetContentExtractor.pack_unprocessed_data(self._command, data, self._command['db'], self._command['userinfo_collection'], 'searched_users')
                    self._redis_client.write_list(self._twitter_config.response_queue, unprocessed_data)
                elif len(users) > 0:
                    processed_data = TweetContentExtractor.pack_processed_data(self._command['db'], self._command['userinfo_collection'], users)
                    self._redis_client.write_list(self._twitter_config.response_queue, processed_data)

                print(message_head(0) + "获取第 " + str(scrape_count) + " 个请求数据，本次推文共 " + str(len(tweets)) + "\t\t获取下一个scroll\t" + scroll)

    def _get_command(self):
        try:
            self._redis_client.get_list_length(self._twitter_config.task_queue)
        except Exception as ex:
            print(message_head(1) + "redis初始化失败，请检查参数是否正确以及远端是否开启服务！")
            print(message_head(1), ex)
            sys.exit(1)

        sleep_count = 0
        while True:
            try:
                if self._redis_client.get_list_length(self._twitter_config.urgent_task_queue) > 0:
                    command = self._redis_client.read_list(self._twitter_config.urgent_task_queue)
                elif self._redis_client.get_list_length(self._twitter_config.task_queue) > 0:
                    command = self._redis_client.read_list(self._twitter_config.task_queue)
                else:
                    print(message_head(0) + "等待任务.........")
                    time.sleep(30)
                    sleep_count += 1
                    if sleep_count > 10:
                        return None
                    continue
            except Exception as e:
                print(message_head(1) + "获取任务出错******")
                print(message_head(1), e)
                time.sleep(120)
                continue
            if command is None:
                continue
            try:
                for key in command.keys():
                    if len(command[key]) > 0:
                        continue
                    else:
                        print(message_head(1) + "命令内容有缺失，请检查命令内容！")
                        sys.exit(1)
                print(message_head(0) + "获取命令：" + str(command))
                return command
            except Exception as ex:
                print(message_head(1) + "处理命令字段不完整！")
                print(message_head(1), ex)
                sys.exit(1)

    def start(self):
        while True:
            self._command = self._get_command()
            if self._command is None:
                print(message_head(0) + "连续10次任务队列为空，即将退出.........")
                break
            if self._twitter_config.task_type_search == self._command['type']:
                self._scrape_search()
            elif self._twitter_config.task_type_timeline == self._command['type']:
                self._scrape_timeline()
            else:
                print(message_head(1) + "命令类型未定义！")
                print(message_head(1), self._command)


if __name__ == '__main__':
    twitter_scraper = TwitterScraper(general_settings.TwitterConfig())
    twitter_scraper.start()
