import hashlib
import requests
from datetime import datetime
import json
import random
import re
import sys
import time
import redis
import os
import urllib.parse


def now_datetime_str():
    return datetime.now().strftime("%Y-%m-%d %H:%M:%S")


def read_redis(redis_client, queue_name):
    while True:
        try:
            return redis_client.rpop(queue_name)
        except Exception as e:
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\tredis读取错误，操作队列名称：" + queue_name)
            print(e)
            time.sleep(60)


def write_redis(redis_client, queue_name, data):
    while True:
        try:
            redis_client.lpush(queue_name, json.dumps(data))
            break
        except Exception as e:
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\tredis写入错误:")
            print(e)
            print(data)
            time.sleep(60)


def get_token(redis_client, request_token_queue_name, get_csrf_token, token_queue_name):
    check_count = 0
    send_request = False
    while True:
        try:
            # 队列中有token，直接取用
            if redis_client.llen(token_queue_name) > 0:
                token = json.loads(read_redis(redis_client, token_queue_name))
                print(now_datetime_str() + "\t\t获取token：" + str(token))
                return token
            else:
                # 队列中没有token，如果没有发送过请求，发送请求，如果已发送过，则等待斌轮询队列
                # 未发送过请求，则发送一次，并设置send_request为True
                if not send_request:
                    write_redis(redis_client, request_token_queue_name, get_csrf_token)
                    print(now_datetime_str() + "\t\t发送token请求")
                    send_request = True
                else:
                    # 已发送过请求，如果token请求队列为空，则检查次数 +1，
                    if redis_client.llen(request_token_queue_name) == 0:
                        check_count += 1
                        # 发送过请求5分钟后仍未获取token且请求队列为空，
                        # 则重置send_request = False和check_count = 0，程序会再次发送请求
                        if check_count > 10:
                            send_request = False
                            check_count = 0
                    time.sleep(30)
        except Exception as ex:
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\ttoken获取失败")
            print(ex)
            time.sleep(60)
            continue


def process_date(datetime_str):
    fmt = "%a %b %d %H:%M:%S %Y"
    try:
        dt = datetime.strptime(str(datetime_str).replace('+0000 ', ''), fmt)
        return dt.strftime("%Y-%m-%d %H:%M:%S").split(' ')
    except Exception as ex:
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 日期处理错误")
        print(datetime_str)
        print(ex)
        return []


def split_content_reply2(txt):
    content = []
    reply_to = []
    reply_end = False
    for word in str(txt).split(' '):
        if word.startswith('@') and not reply_end:
            reply_to.append(word)
        else:
            reply_end = True
            content.append(word)
    return ' '.join(reply_to)


def extract_searched_tweets(tweets_json):
    tw_contents = []
    tweets = tweets_json['globalObjects']['tweets']
    user = tweets_json['globalObjects']['users']

    for tweet in tweets:
        content_dict = {}
        content_dict['user_id'] = tweets[tweet]['user_id_str']
        content_dict['user_screen_name'] = user[tweets[tweet]['user_id_str']]['screen_name']
        content_dict['user_name'] = user[tweets[tweet]['user_id_str']]['name']
        content_dict['src_user_id'] = tweets[tweet]['user_id_str']
        content_dict['src_user_screen_name'] = user[tweets[tweet]['user_id_str']]['screen_name']
        content_dict['src_user_name'] = user[tweets[tweet]['user_id_str']]['name']
        content_dict['tweet_id'] = content_dict['user_id'] + '/status/' + tweets[tweet]['id_str']
        hl = hashlib.md5()
        hl.update(str(content_dict['tweet_id']).encode(encoding='utf-8'))
        content_dict['_id'] = hl.hexdigest()
        content_dict['full_text'] = tweets[tweet]['full_text']
        content_dict['reply_to'] = tweets[tweet]['in_reply_to_screen_name']
        content_dict['reply_count'] = tweets[tweet]['reply_count']
        content_dict['retweet_count'] = tweets[tweet]['retweet_count']
        content_dict['quote_count'] = tweets[tweet]['quote_count']
        content_dict['favorite_count'] = tweets[tweet]['favorite_count']
        content_dict['lang'] = tweets[tweet]['lang']
        content_dict['created_date'], content_dict['created_time'] = process_date(tweets[tweet]['created_at'])
        content_dict['hashtag'] = ' '.join(re.findall(r"#[^ ]+", content_dict['full_text']))
        content_dict['tweet_json'] = tweets[tweet]
        # content_dict['user_json'] = user[tweets[tweet]['user_id_str']]
        tw_contents.append(content_dict)
    return tw_contents, False


def extract_searched_user_info(data):
    user_list = []
    users = data['globalObjects']['users']
    for user in users:
        user_info = {}
        user_info['uid'] = users[user]['id_str']  # 用户数字ID，不会变化
        hl = hashlib.md5()
        hl.update(str(user_info['uid']).encode(encoding='utf-8'))
        user_info['_id'] = hl.hexdigest()
        user_info['screen_name'] = users[user]['screen_name']  # 用户文本ID（@后面的字符），可以更改
        user_info['name'] = users[user]['name']  # 用户名称，可以更改
        user_info['created_date'], user_info['created_time'] = process_date(users[user]['created_at'])  # 账户创建时间
        user_info['description'] = users[user]['description']  # 用户自己填的个人描述
        user_info['location'] = users[user]['location']  # 用户位置
        user_info['favourites_count'] = users[user]['favourites_count']  # 给别人点赞的数量
        user_info['followers_count'] = users[user]['followers_count']  # 粉丝数
        user_info['friends_count'] = users[user]['friends_count']  # 关注数
        user_info['tweet_count'] = users[user]['statuses_count']  # 推文数量
        user_info['data'] = users[user]  # 用户信息json，包含全部信息
        user_info['update_date'] = datetime.strftime(datetime.now(), "%Y-%m-%d")  # 更新日期
        user_list.append(user_info)
    return user_list


def extract_timeline_user_info(data):
    legacy = data['data']['user']['legacy']
    user_info = {}
    user_info['uid'] = data['data']['user']['rest_id']  # 用户数字ID，不会变化
    hl = hashlib.md5()
    hl.update(str(user_info['uid']).encode(encoding='utf-8'))
    user_info['_id'] = hl.hexdigest()
    user_info['screen_name'] = legacy['screen_name']  # 用户文本ID（@后面的字符），可以更改
    user_info['name'] = legacy['name']  # 用户名称，可以更改
    user_info['created_date'], user_info['created_time'] = process_date(legacy['created_at'])  # 账户创建时间
    user_info['description'] = legacy['description']  # 用户自己填的个人描述
    user_info['location'] = legacy['location']  # 用户位置
    user_info['favourites_count'] = legacy['favourites_count']  # 给别人点赞的数量
    user_info['followers_count'] = legacy['followers_count']  # 粉丝数
    user_info['friends_count'] = legacy['friends_count']  # 关注数
    user_info['tweet_count'] = legacy['statuses_count']  # 推文数量
    user_info['data'] = data  # 用户信息json，包含全部信息
    user_info['update_date'] = datetime.strftime(datetime.now(), "%Y-%m-%d")  # 更新日期
    return user_info


# 获取单个tweet json数据中的内容，正常返回推文数据和继续标志位（False），遇到设置的时间和日期则停止并返回空数据和终止标志位（True）
def extract_timeline_tweet_content(tweet, start_date, start_time):
    content_dict = {}
    try:
        core = tweet['content']['itemContent']['tweet']['core']
        legacy = tweet['content']['itemContent']['tweet']['legacy']

        content_dict['user_id'] = core['user']['rest_id']  # 用户数字ID，不可变
        content_dict['user_screen_name'] = core['user']['legacy']['screen_name']  # 用户名称,@后面的字符
        content_dict['user_name'] = core['user']['legacy']['name']  # 用户昵称

        if 'retweeted_status' in legacy.keys():
            content_dict['src_user_id'] = legacy['retweeted_status']['core']['user']['rest_id']  # 转推源用户ID
            content_dict['src_user_screen_name'] = legacy['retweeted_status']['core']['user']['legacy'][
                'screen_name']  # 转推源用户名称,@后面的字符
            content_dict['src_user_name'] = legacy['retweeted_status']['core']['user']['legacy']['name']  # 转推源用户昵称
            content_dict['tweet_id'] = content_dict['src_user_id'] + '/status/' + legacy['retweeted_status']['rest_id']
            hl = hashlib.md5()
            hl.update(str(content_dict['tweet_id']).encode(encoding='utf-8'))
            content_dict['_id'] = hl.hexdigest()
            content_dict['pinned'] = '0'
            content_dict['full_text'] = legacy['retweeted_status']['legacy']['full_text']
            content_dict['reply_count'] = legacy['retweeted_status']['legacy']['reply_count']  #
            content_dict['retweet_count'] = legacy['retweeted_status']['legacy']['retweet_count']
            content_dict['quote_count'] = legacy['retweeted_status']['legacy']['quote_count']
            content_dict['favorite_count'] = legacy['retweeted_status']['legacy']['favorite_count']
            content_dict['lang'] = legacy['retweeted_status']['legacy']['lang']
        else:
            content_dict['src_user_id'] = content_dict['user_id']
            content_dict['src_user_screen_name'] = content_dict['user_screen_name']  # 用户名称,@后面的字符
            content_dict['src_user_name'] = content_dict['user_name']  # 用户昵称
            content_dict['tweet_id'] = content_dict['user_id'] + '/status/' + legacy['id_str']
            hl = hashlib.md5()
            hl.update(str(content_dict['tweet_id']).encode(encoding='utf-8'))
            content_dict['_id'] = hl.hexdigest()
            content_dict['full_text'] = legacy['full_text']
            content_dict['reply_count'] = legacy['reply_count']  #
            content_dict['retweet_count'] = legacy['retweet_count']
            content_dict['quote_count'] = legacy['quote_count']
            content_dict['favorite_count'] = legacy['favorite_count']
            content_dict['lang'] = legacy['lang']

        content_dict['created_date'], content_dict['created_time'] = process_date(legacy['created_at'])
        if content_dict['created_date'] <= start_date and content_dict['created_time'] <= start_time:
            return None, True
        content_dict['reply_to'] = legacy['in_reply_to_screen_name']
        content_dict['hashtag'] = ' '.join(re.findall(r"#[^ ]+", content_dict['full_text']))
        content_dict['tweet_json'] = tweet
        return content_dict, False
    except Exception as ex:
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "   ERROR: " + str(tweet))
        print(ex)
        return None, False


# 获取置顶的推文
def extract_timeline_pinned(data):
    if len(data['data']['user']['result']['timeline']['timeline']['instructions']) > 1:
        tweet = data['data']['user']['result']['timeline']['timeline']['instructions'][1]['entry']
        content_dict, reach_start_date = extract_timeline_tweet_content(tweet, "1000-01-01", "01:01:01")
        if content_dict is not None:
            # content_dict['pinned'] = '1'
            return content_dict
        else:
            return None
    else:
        return None


def extract_timeline_tweets(data, start_date, start_time):
    entries = data['data']['user']['result']['timeline']['timeline']['instructions'][0]['entries']
    tw_contents = []

    # 为2，则没有新数据，只有cursor的top和bottom
    if len(entries) == 2:
        return tw_contents, False
    for tweet in entries:
        if 'cursor-' in tweet['entryId']:
            continue
        content_dict, reach_start_date = extract_timeline_tweet_content(tweet, start_date, start_time)
        if reach_start_date:
            return tw_contents, reach_start_date
        else:
            if content_dict is not None:
                tw_contents.append(content_dict)
    return tw_contents, False


def complete_header(header, command, redis_client, request_token_queue_name, request_token, token_queue_name):
    try:
        header['path'] = str(command['url']).replace('https://twitter.com', '')
        header['referer'] = str(command['referer'])
        global token
        if isinstance(token, dict):
            if len(token.keys()) == 0:
                token = get_token(redis_client, request_token_queue_name, request_token, token_queue_name)
        else:
            token = get_token(redis_client, request_token_queue_name, request_token, token_queue_name)
        header['x-csrf-token'] = str(token['csrf'])
        header['x-guest-token'] = str(token['guest'])
        return header
    except Exception as ex:
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 command命令缺失必要内容！")
        print(command)
        print(ex)
        return None


def get_response(redis_client, header, url):
    url_error_count = 0
    while True:
        try:
            response = requests.get(url, headers=header, timeout=15)
            print("get")
            time.sleep(5 + random.random() * 5)  # 空出时间
            return response
        except Exception as ex:
            url_error_count += 1
            # 连续十次获取失败，输出错误提示，返回空值
            print(now_datetime_str() + "\t\t重新打开url")
            time.sleep(30)
            # 每重试10次，重新获取一次token
            if url_error_count % 5 == 0:
                token = get_token(redis_client, request_token_queue_name, request_token, token_queue_name)
                header['x-csrf-token'] = str(token['csrf'])
                header['x-guest-token'] = str(token['guest'])
            # 多次获取失败后该如何处理，执行下一个还是继续？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？？
            if url_error_count > 20:
                print(now_datetime_str() + "\t\t进程错误：【" + str(os.getpid()) + "】 打开url失败！")
                print(url)
                print(ex)
                return None


def scrape_timeline(header, command, redis_client, request_token_queue_name, request_token,
                    token_queue_name, error_task_queue_name, response_queue_name):
    # 获取用户信息，如果打开url有误，则命令入库
    try:
        processed_header = complete_header(header, command, redis_client, request_token_queue_name
                                           , request_token, token_queue_name)
        # 如果header为空，表示传过来的命令有缺失的信息，不予执行命令并抛弃
        if processed_header is None:
            return
        response = get_response(redis_client, processed_header, command['url'])
        if response is None:
            write_redis(redis_client, error_task_queue_name, command)
            return
    except Exception as ex:
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 打开url出错！scrape_timeline head")
        print(command)
        print(ex)
        write_redis(redis_client, error_task_queue_name, command)
        return

    data = json.loads(response.text)
    if 'data' not in data.keys():
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 response数据错误")
        print(data)
        print(command)
        return

    user_info = {}
    user_info['db'] = command['db']
    user_info['collection'] = command['user_info']
    user_info['data'] = extract_timeline_user_info(data)
    write_redis(redis_client, response_queue_name, user_info)

    # timeline的response包含置顶推文和下一个cursor
    # 获取userId和设置scroll的初始值
    userId = data['data']['user']['rest_id']
    cursor = ""
    tweets_empty_count = 0
    forbidden_count = 0
    scrape_count = 1
    while True:
        new_url = str.format(command['timeline_url'], userId, cursor)
        processed_header['path'] = new_url.replace('https://twitter.com', '')
        response = get_response(redis_client, processed_header, new_url)
        # response为空，则获取失败，命令放入错误队列
        if response is None:
            write_redis(redis_client, error_task_queue_name, command)
            return

        data = json.loads(response.text)
        # 数据不正常
        if 'data' not in data.keys():
            # 访问被服务器禁止，更新token再次访问
            if 'Forbidden' in str(data):
                forbidden_count += 1
                if forbidden_count > 5:
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t连续获取token超过5次，退出循环")
                    write_redis(redis_client, error_task_queue_name, command)
                    return
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 获取新token")
                token = get_token(redis_client, request_token_queue_name, request_token, token_queue_name)
                processed_header['x-csrf-token'] = str(token['csrf'])
                processed_header['x-guest-token'] = str(token['guest'])
            else:
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 response数据错误")
                print(data)
                write_redis(redis_client, error_task_queue_name, command)
                return
        else:
            forbidden_count = 0
            # 时间线第一条可能是置顶推特
            if scrape_count <= 1:
                pinned = extract_timeline_pinned(data)
                tweets_data = {}
                tweets_data['db'] = command['db']
                tweets_data['collection'] = command['collection']
                tweets_data['data'] = pinned
                write_redis(redis_client, response_queue_name, tweets_data)
            tweets, reach_start_date = extract_timeline_tweets(data, command['start_date'], command['start_time'])

            # 为0表示已到底部，没有新数据
            if len(tweets) == 0:
                tweets_empty_count += 1
                if tweets_empty_count > 5:
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 爬取完毕！共获取【" + str(scrape_count) + "】个请求数据......................")
                    break
            else:
                tweets_data = {}
                tweets_data['db'] = command['db']
                tweets_data['collection'] = command['collection']
                tweets_data['data'] = tweets
                write_redis(redis_client, response_queue_name, tweets_data)
                scrape_count += 1
                if scrape_count % 10 == 1:
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t获取第 " + str(scrape_count) + " 个请求数据，本次推文共  " + str(len(tweets)))
            entries = data['data']['user']['result']['timeline']['timeline']['instructions'][0]['entries']
            if 'Bottom' == entries[-1]['content']['cursorType']:
                scroll = urllib.parse.quote(str(entries[-1]['content']['value']))
                cursor = 'cursor%22%3A%22' + scroll + "%22%2C%22"
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "   进程：【" + str(os.getpid()) + "】获取" + scroll)
            else:
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 json未找到scroll")
                print(data)
                break


def scrape_search(header, command, redis_client, request_token_queue_name, request_token, token_queue_name, error_task_queue_name, response_queue_name):
    tweets_empty_count = 0
    forbidden_count = 0
    scrape_count = 0
    data_error_count = 0
    replace_str = 'pc=1&spelling_corrections=1'
    scroll = ""
    processed_header = complete_header(header, command, redis_client, request_token_queue_name, request_token, token_queue_name)
    # 如果header为空，表示传过来的命令有缺失的信息，不予执行命令并抛弃
    if processed_header is None:
        return
    while True:
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t当前任务：\t\t" + str(command))
        cursor = 'cursor=' + scroll + "&" + replace_str if len(scroll) > 0 else replace_str
        new_url = str(command['url']).replace(replace_str, cursor)
        processed_header['path'] = new_url.replace('https://twitter.com', '')
        response = get_response(redis_client, processed_header, new_url)
        # 如果打开url有误，则命令入库
        if response is None:
            write_redis(redis_client, error_task_queue_name, command)
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t命令返回到错误队列................")
            return

        data = json.loads(str(response.text))
        # 存在globalObjects则表示存在数据
        if 'globalObjects' not in data.keys():
            # 访问被服务器禁止，更新token
            if 'Forbidden' in str(data):
                forbidden_count += 1
                data_error_count = 0
                # 获取5次token依旧没有解决问题，退出循环，进行下一个命令
                if forbidden_count > 5:
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t连续获取token超过5次，退出循环")
                    write_redis(redis_client, error_task_queue_name, command)
                    return
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 获取新token")
                token = get_token(redis_client, request_token_queue_name, request_token, token_queue_name)
                processed_header['x-csrf-token'] = str(token['csrf'])
                processed_header['x-guest-token'] = str(token['guest'])
            # 服务超出twitter服务器容量限制，不停等待并重新获取即可
            elif 'Over capacity' in str(data):
                forbidden_count = 0
                data_error_count = 0
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 Over capacity，请求超出服务器容量.............")
                time.sleep(120)
            else:
                data_error_count += 1
                forbidden_count = 0
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 response数据错误")
                print(data)
                time.sleep(30)
                # 连续20次数据错误，可能是twitter数据格式变化，返回任务
                max_error = 10
                if data_error_count > max_error:
                    write_redis(redis_client, error_task_queue_name, command)
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 response连续 " + str(max_error) + " 次数据错误")
                    return
        else:
            forbidden_count = 0
            data_error_count = 0
            tweets, reach_start_date = extract_searched_tweets(data)
            users = extract_searched_user_info(data)
            if len(tweets) == 0:
                tweets_empty_count += 1
                if tweets_empty_count > 5:
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 爬取完毕！共获取【" + str(scrape_count) + "】个请求数据...............")
                    break
            else:
                tweets_empty_count = 0
                tweets_data = {}
                tweets_data['db'] = command['db']
                tweets_data['collection'] = command['collection']
                tweets_data['data'] = tweets
                write_redis(redis_client, response_queue_name, tweets_data)

                users_data = {}
                users_data['db'] = command['db']
                users_data['collection'] = command['user_info']
                users_data['data'] = users
                write_redis(redis_client, response_queue_name, users_data)

            if len(data['timeline']['instructions']) > 1:
                entries = data['timeline']['instructions'][-1]['replaceEntry']['entry']
                if 'operation' in entries['content'].keys():
                    content = entries['content']
                else:
                    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t获取的json数据格式不正确，请检查！")
                    print(data)
                    break
            else:
                entries = data['timeline']['instructions'][-1]['addEntries']['entries']
                content = entries[len(entries) - 1]['content']
            scroll = urllib.parse.quote(str(content['operation']['cursor']['value']))
            scrape_count += 1
            # if scrape_count % 5 == 1:
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t获取第 " + str(scrape_count) + " 个请求数据，本次推文共 " + str(len(tweets)) + "\t\t获取" + scroll)


def worker(header, redis_host, redis_port, urgent_task_queue_name, task_queue_name, request_token_queue_name
           , request_token, token_queue_name, error_task_queue_name, response_queue_name):
    redis_client = redis.Redis(host=redis_host, port=redis_port)
    try:
        redis_client.llen(task_queue_name)
    except Exception as ex:
        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(
            os.getpid()) + "】 redis初始化失败，请检查参数是否正确以及远端是否开启服务！")
        print(ex)
        sys.exit(1)
    sleep_count = 0
    while True:
        try:
            if redis_client.llen(urgent_task_queue_name) > 0:
                command = json.loads(read_redis(redis_client, urgent_task_queue_name))
            elif redis_client.llen(task_queue_name) > 0:
                command = json.loads(read_redis(redis_client, task_queue_name))
            else:
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 等待任务.........")
                time.sleep(30)
                sleep_count += 1
                if sleep_count > 10:
                    break
                continue

            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 获取命令：" + str(
                command))
            try:
                for key in command.keys():
                    if len(command[key]) > 0:
                        continue
                    else:
                        print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(
                            os.getpid()) + "】 命令内容有缺失，请检查命令内容！")
                        sys.exit(1)
            except Exception as ex:
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程错误：【" + str(os.getpid()) + "】 处理命令字段不完整！")
                print(ex)
                sys.exit(1)
            if 'SEARCH' == command['type']:
                scrape_search(header, command, redis_client, request_token_queue_name, request_token,
                              token_queue_name, error_task_queue_name, response_queue_name)
            elif 'TIMELINE' == command['type']:
                scrape_timeline(header, command, redis_client, request_token_queue_name, request_token,
                                token_queue_name, error_task_queue_name, response_queue_name)
            else:
                print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t命令类型未定义！")
                print(command)

        except Exception as e:
            print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\t\t进程：【" + str(os.getpid()) + "】 获取任务出错******")
            print(e)
            time.sleep(120)


token = {}
if __name__ == '__main__':
    redis_host = '10.10.69.30'
    redis_port = 6379
    urgent_task_queue_name = 'urgent_task'
    task_queue_name = 'task'
    request_token_queue_name = 'request_token'
    request_token = 'csrf_token'
    token_queue_name = 'token'
    error_task_queue_name = 'error_task'
    response_queue_name = 'response'

    processed_header = {'authority': 'twitter.com',
                        'method': 'GET',
                        'path': '',
                        'scheme': 'https',
                        'accept': '*/*',
                        'accept-encoding': 'gzip, deflate, br',
                        'accept-language': 'zh-CN,zh;q=0.9',
                        'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',
                        'cookie': 'personalization_id="v1_1By4WUgabU+79Sr4bKsP5w=="; guest_id=v1:162154103032126079; _ga=GA1.2.1080343483.1621604100; eu_cn=1; G_ENABLED_IDPS=google; _gid=GA1.2.987455938.1630073075; external_referer=padhuUp37zjgzgv1mFWxJ5Xq0CLV+bpWuS41v6lN3QU=|0|8e8t2xd8A2w=; '
                                  'ct0=274b24020c92ca3d44b24db9770fbbd9; gt=1433881876201152516',
                        'referer': '',
                        'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
                        'sec-ch-ua-mobile': '?0',
                        'sec-fetch-dest': 'empty',
                        'sec-fetch-mode': 'cors',
                        'sec-fetch-site': 'same-origin',
                        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
                        'x-csrf-token': '',
                        'x-guest-token': '',
                        'x-twitter-active-user': 'yes',
                        'x-twitter-client-language': 'zh-cn',
                        }
    worker(processed_header, redis_host, redis_port, urgent_task_queue_name, task_queue_name, request_token_queue_name
           , request_token, token_queue_name, error_task_queue_name, response_queue_name)
