import re, json, logging
import time
from urllib.parse import quote

from scrapy import http
from scrapy.spiders import CrawlSpider
# from scrapy.shell import inspect_response
from scrapy.core.downloader.middleware import DownloaderMiddlewareManager
from scrapy_selenium import SeleniumRequest, SeleniumMiddleware
from scrapy.utils.project import get_project_settings

# from TweetScraper.items import Tweet, User
from TweetScraper.items import Tweet
import random
import pymysql

SETTINGS = get_project_settings()
logger = logging.getLogger(__name__)

# 创建一个handler，用于写入日志文件
file_handler = logging.FileHandler(filename=SETTINGS['LOG_PATH'])
logger.addHandler(file_handler)

class TweetScraper(CrawlSpider):
    name = 'TweetScraperByUserId'
    allowed_domains = ['twitter.com']
    custom_settings = {
        'ITEM_PIPELINES': {
            'TweetScraper.pipelines.SaveToFilePipeline': 100,
        },
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_selenium.SeleniumMiddleware': 800,
        }
    }


    def __init__(self):
        # read keyword form database
        self.conn = pymysql.connect(host=SETTINGS['MYSQL_HOST'],
                               port=SETTINGS['MYSQL_PORT'],
                               user=SETTINGS['MYSQL_USER'],
                               password=SETTINGS['MYSQL_PASS'],
                               db=SETTINGS['MYSQL_DB'])
        self.cursor = self.conn.cursor()
        sql = "SELECT user_id, max_page, real_name FROM {} where status = true ".format(SETTINGS['MYSQL_TABLE_NAME_USER'])
        try:
            self.cursor.execute(sql)
        except Exception as e:
            print('Fail to get user_ids from database!', e)
            exit()
        # finally:
        #     cursor.close()
        #     conn.close()
        self.user_list = set(self.cursor.fetchall())
        if len(self.user_list) == 0:
            logger.info("no user infos are provided to search, make sure user_list not null")
            exit()
        url_base = 'https://api.twitter.com/2/timeline/profile/{userId}.json?'
        url_param = (
            f'include_profile_interstitial_type=1'
            f'&include_blocking=1'
            f'&include_blocked_by=1'
            f'&include_followed_by=1'
            f'&include_want_retweets=1'
            f'&include_mute_edge=1'
            f'&include_can_dm=1'
            f'&include_can_media_tag=1'
            f'&skip_status=1'
            f'&cards_platform=Web-12'
            f'&include_cards=1'
            f'&include_ext_alt_text=true'
            f'&include_quote_count=true'
            f'&include_reply_count=1'
            f'&tweet_mode=extended'
            f'&include_entities=true'
            f'&include_user_entities=true'
            f'&include_ext_media_color=true'
            f'&include_ext_media_availability=true'
            f'&send_error_codes=true'
            f'&simple_quoted_tweet=true'
            f'&include_tweet_replies=false'
            f'&count=20'
            f'&pc=1'
            f'&spelling_corrections=1'
            f'&ext=mediaStats%2ChighlightedLabel'
        )
        self.url = url_base + url_param + '&userId={userId}'
        self.user_id, self.max_page, self.real_name = self.user_list.pop()
        self.num_search_issued = 0
        self.page_index_per_keyword = 0
        self.cursor_temp = ''
        self.source = "twitter"

    def start_requests(self):
        """
        Use the landing page to get cookies first
        """
        yield SeleniumRequest(url="https://twitter.com/explore", callback=self.parse_home_page)

    def parse_home_page(self, response):
        """
        Use the landing page to get cookies first
        """
        # inspect_response(response, self)
        self.update_cookies(response)
        for r in self.start_query_request():
            yield r

    def update_cookies(self, response):
        driver = response.meta['driver']
        try:
            self.cookies = driver.get_cookies()
            self.x_guest_token = driver.get_cookie('gt')['value']
            # self.x_csrf_token = driver.get_cookie('ct0')['value']
        except:
            logger.info('cookies are not updated!')

        self.headers = {
            'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',
            'x-guest-token': self.x_guest_token,
            # 'x-csrf-token': self.x_csrf_token,
        }
        print('headers:\n--------------------------\n')
        print(self.headers)
        print('\n--------------------------\n')

    def start_query_request(self, cursor=None):
        """
        Generate the search request
        """
        if cursor:
            url = self.url + '&cursor={cursor}'
            url = url.format(userId=quote(self.user_id), cursor=quote(cursor))
        else:
            logger.info('Start to query keyword: %s' % self.user_id)
            url = self.url.format(userId=quote(self.user_id))

        request = http.Request(url, callback=self.parse_result_page, cookies=self.cookies, headers=self.headers)
        yield request

        self.num_search_issued += 1
        self.page_index_per_keyword += 1
        if self.num_search_issued % 100 == 0:
            # get new SeleniumMiddleware            
            for m in self.crawler.engine.downloader.middleware.middlewares:
                if isinstance(m, SeleniumMiddleware):
                    m.spider_closed()
            self.crawler.engine.downloader.middleware = DownloaderMiddlewareManager.from_crawler(self.crawler)
            # update cookies
            yield SeleniumRequest(url="https://twitter.com/explore", callback=self.update_cookies, dont_filter=True)

    def parse_result_page(self, response):
        """
        Get the tweets & users & next request
        """
        # inspect_response(response, self)
        if not response.url or 'exception' in response.url:  # 接收到url==''时
            if len(self.user_list):
                self.page_index_per_keyword = 0
                self.user_id, self.max_page, self.real_name, self.source = self.user_list.pop()
                time.sleep(random.randint(5, 10))
                for r in self.start_query_request():
                    yield r
        else:
            # handle current page
            data = json.loads(response.text)
            for item in self.parse_tweet_item(data['globalObjects']['tweets']):
                yield item


            # get next page
            # self.cursor_re.search(response.text).group(1)
            try:
                cursor = data['timeline']['instructions'][0]['addEntries']['entries'][-1]['content']['operation']['cursor']['value']
            except:
                cursor = self.cursor_temp
            if len(data['globalObjects']['tweets']) > 1 and self.cursor_temp != cursor and (self.max_page < 0 or self.page_index_per_keyword < self.max_page):
                self.cursor_temp = cursor
                for r in self.start_query_request(cursor=cursor):
                    yield r
            else:
                sql = "update {} set status = 0 where user_id = '{}' ".format('following_user', self.user_id)
                try:
                    self.cursor.execute(sql)
                    self.conn.commit()
                except Exception as e:
                    print('Fail to update user_id!', e)
                if len(self.user_list):
                    self.page_index_per_keyword = 0
                    self.user_id, self.max_page, self.real_name = self.user_list.pop()
                    time.sleep(random.randint(10, 20))
                    for r in self.start_query_request():
                        yield r

    def parse_tweet_item(self, items):
        for k, v in items.items():
            # assert k == v['id_str'], (k,v)
            tweet = Tweet()
            tweet['id_'] = k
            tweet['keyword'] = self.user_id
            tweet['raw_data'] = v
            tweet['real_name'] = self.real_name
            tweet['source'] = self.source
            yield tweet

    # def parse_user_item(self, items):
    #     for k,v in items.items():
    #         # assert k == v['id_str'], (k,v)
    #         user = User()
    #         user['id_'] = k
    #         user['raw_data'] = v
    #         yield user