import scrapy
import logging, json
from urllib.parse import quote
from scrapy_selenium import SeleniumRequest
from scrapy import http
import time
from TweetScraper.items import Following
import os

logger = logging.getLogger(__name__)
class FollowingspiderSpider(scrapy.Spider):
    name = 'FollowingSpider'
    allowed_domains = ['twitter.com']
    custom_settings = {
        'ITEM_PIPELINES': {
            'TweetScraper.pipelines.SaveUserToDBPipelne': 300,
        },
        'DOWNLOADER_MIDDLEWARES': {
            'TweetScraper.middlewares.LoadCookiesMiddleware': 299,
        }
    }

    def __init__(self):
        self.user_infos = [("1071037504076173313", "陈柏惟"), ("883578527240511489", "王定宇")]
        self.variables_base = '{{"userId":"{}","count":20,"withHighlightedLabel":false,"withTweetQuoteCount":false,' \
                    '"includePromotedContent":false,"withTweetResult":false,"withUserResult":false'
        self.url_base = 'https://twitter.com/i/api/graphql/8jxlfJOiZQxVr9Pd_fafLw/Following?variables='
        self.headers = {
            "TE": "Trailers",
            'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',
            "x-twitter-auth-type": "OAuth2Session",
            "x-twitter-active-user": "yes",
            "x-csrf-token": "cc0e27ebcfe047d7443186a920bd9aafc9e20b0a9726cddab0206d55c44337ea1bcd5c021873c944083e46daba62666fa72dac7bd0361fce9307ddae10f3f285d74d7d199d2746890ccb1915802b8a2f"
        }
        self.userId, self.username = self.user_infos.pop()
        logger.info("Search following user of user: %s", self.userId)

    def start_requests(self):
        request = self.start_query_request()
        yield request

        # yield SeleniumRequest(url="https://twitter.com/explore", callback=self.parse_home_page)

    def start_query_request(self, cursor=None):
        if cursor:
            variables = self.variables_base + ',"cursor": "{}"'.format(cursor) + '}}'
        else:
            variables = self.variables_base + '}}'
        url = self.url_base + quote(variables.format(self.userId))
        request = http.Request(url, callback=self.parse_result_page, headers=self.headers)
        return request

    def parse_result_page(self, response):
        data = json.loads(response.text)
        item_lists = data['data']['user']['following_timeline']['timeline']['instructions'][-1]['entries']
        for item in item_lists[:-2]:
            user_id_str = item['entryId']
            if 'user-' not in user_id_str:
                continue
            yield self.parse_following_item(item)

        logger.info("next page")
        cursor = item_lists[-2]['content']['value']

        if cursor.split('|')[0] != '0':
            request = self.start_query_request(cursor=cursor)
            yield request
        time.sleep(3)
        print("----------------------------------------------------")
        if len(self.user_infos):
            self.userId, self.username = self.user_infos.pop()
            logger.info("Search following user of user: %s", self.userId)
            request = self.start_query_request()
            yield request


    def parse_following_item(self, item):
        # assert k == v['id_str'], (k,v)
        user = Following()
        user['user_id'] = item['entryId'].replace('user-', '')
        user['real_name'] = item['content']['itemContent']['user']['legacy']['name']
        user['description'] = item['content']['itemContent']['user']['legacy']['description']
        user['follower'] = self.username
        return user


