import scrapy
import json
import re
from urllib.parse import quote
from twitter_graphql_crawler.items import TweetItem


class TwitterGraphQLSpider(scrapy.Spider):
    name = "twitter_graphql"
    start_urls = ["https://twitter.com/i/flow/login"]
    target_user = "@Kitti3Miti"  # 可替换为目标用户名
    graphql_operation = "VlVdox2qNp9uFZ7wXKJ7dw/UserTweets"  # 2025年最新操作名

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.credentials = {
            "username": "@Otto_Apocalyp__",  # 替换为实际用户名
            "password": "951753lailai"  # 替换为实际密码
        }

    def parse(self, response):
        """获取初始凭证并登录"""
        csrf_token = self.extract_csrf(response.text)
        if not csrf_token:
            self.logger.error("CSRF token not found in login page")
            return

        return scrapy.FormRequest(
            url="https://twitter.com/i/api/2/authenticate",
            formdata={
                "username": self.credentials["username"],
                "password": self.credentials["password"],
                "redirect_after_login": f"https://twitter.com/{self.target_user}"
            },
            callback=self.after_login,
            headers={
                'x-csrf-token': csrf_token,
                'content-type': 'application/x-www-form-urlencoded'
            },
            meta={'csrf_token': csrf_token}
        )

    def after_login(self, response):
        """登录后获取动态凭证"""
        if response.status != 200:
            self.logger.error(f"Login failed with status {response.status}")
            return

        csrf_token = response.meta['csrf_token']
        yield scrapy.Request(
            url=f"https://twitter.com/{self.target_user}",
            callback=self.get_guest_token,
            meta={'csrf_token': csrf_token},
            headers={'x-csrf-token': csrf_token}
        )

    def get_guest_token(self, response):
        """获取Guest Token和用户ID"""
        csrf_token = response.meta['csrf_token']

        # 提取用户ID
        user_id = self.get_user_id(response.text)
        if not user_id:
            self.logger.error("User ID not found")
            return

        # 提取Guest Token
        script_content = response.xpath('//script[contains(., "guest_token")]/text()').get()
        if not script_content:
            self.logger.error("Guest token script not found")
            return

        match = re.search(r'"guest_token":"(\w+)"', script_content)
        guest_token = match.group(1) if match else None

        if not guest_token:
            self.logger.error("Guest token not found")
            return

        # 构造GraphQL初始请求
        graphql_url = self.construct_graphql_url(user_id=user_id)
        yield scrapy.Request(
            url=graphql_url,
            callback=self.parse_tweets,
            headers=self.build_headers(csrf_token, guest_token),
            meta={
                'user_id': user_id,
                'csrf_token': csrf_token,
                'guest_token': guest_token
            }
        )

    def construct_graphql_url(self, user_id, cursor=None):
        """构建GraphQL请求URL"""
        variables = {
            "userId": user_id,
            "count": 100,
            "cursor": cursor,
            "includePromotedContent": False,
            "withCommunity": False,
            "withVoice": True,
            "withBirdwatchPivots": False
        }

        features = {
            "responsive_web_graphql_exclude_directive_enabled": True,
            "verified_phone_label_enabled": False,
            "creator_subscriptions_tweet_preview_api_enabled": True,
            "responsive_web_graphql_timeline_navigation_enabled": True,
            "responsive_web_graphql_skip_user_profile_image_extensions_enabled": False,
            "c9s_tweet_anatomy_moderator_badge_enabled": True,
            "tweetypie_unmention_optimization_enabled": True,
            "responsive_web_edit_tweet_api_enabled": True,
            "graphql_is_translatable_rweb_tweet_is_translatable_enabled": True,
            "view_counts_everywhere_api_enabled": True,
            "longform_notetweets_consumption_enabled": True,
            "responsive_web_twitter_article_tweet_consumption_enabled": False,
            "tweet_awards_web_tipping_enabled": False,
            "freedom_of_speech_not_reach_fetch_enabled": True,
            "standardized_nudges_misinfo": True,
            "tweet_with_visibility_results_prefer_gql_limited_actions_policy_enabled": True,
            "longform_notetweets_rich_text_read_enabled": True,
            "longform_notetweets_inline_media_enabled": True,
            "responsive_web_media_download_video_enabled": False,
            "responsive_web_enhance_cards_enabled": False
        }

        params = {
            "variables": quote(json.dumps(variables)),
            "features": quote(json.dumps(features))
        }

        return f"https://twitter.com/i/api/graphql/{self.graphql_operation}?{'&'.join(f'{k}={v}' for k, v in params.items())}"

    def parse_tweets(self, response):
        """解析推文数据"""
        try:
            data = json.loads(response.text)
        except json.JSONDecodeError:
            self.logger.error(f"Invalid JSON response: {response.text[:500]}")
            return

        # 检查错误响应
        if 'errors' in data:
            self.logger.error(f"GraphQL error: {data['errors']}")
            return

        # 提取推文数据
        instructions = data.get('data', {}).get('user', {}).get('result', {}).get('timeline', {}).get('timeline',
                                                                                                      {}).get(
            'instructions', [])
        tweet_entries = []

        for instr in instructions:
            if instr.get('type') == 'TimelineAddEntries':
                tweet_entries = [e for e in instr.get('entries', []) if e.get('entryId', '').startswith('tweet-')]
                break

        if not tweet_entries:
            self.logger.warning("No tweet entries found in response")

        for entry in tweet_entries:
            tweet_data = entry.get('content', {}).get('itemContent', {}).get('tweet_results', {}).get('result', {})
            if not tweet_data:
                continue

            item = self.parse_tweet_item(tweet_data)
            if item:
                yield item

        # 分页处理
        next_cursor = self.extract_next_cursor(instructions)
        if next_cursor:
            next_url = self.construct_graphql_url(
                user_id=response.meta['user_id'],
                cursor=next_cursor
            )
            yield scrapy.Request(
                url=next_url,
                callback=self.parse_tweets,
                headers=self.build_headers(
                    response.meta['csrf_token'],
                    response.meta['guest_token']
                ),
                meta=response.meta
            )

    def parse_tweet_item(self, tweet_data):
        """解析单个推文数据结构"""
        # 处理不同推文类型
        if 'tweet' in tweet_data:
            tweet_data = tweet_data['tweet']

        core_data = tweet_data.get('core', {}).get('user_results', {}).get('result', {})
        legacy_data = tweet_data.get('legacy', {})
        views_data = tweet_data.get('views', {})

        if not legacy_data:
            return None

        item = TweetItem()
        item['user_id'] = core_data.get('rest_id')
        item['tweet_id'] = legacy_data.get('id_str')
        item['created_at'] = legacy_data.get('created_at')
        item['content'] = legacy_data.get('full_text')
        item['retweet_count'] = legacy_data.get('retweet_count')
        item['like_count'] = legacy_data.get('favorite_count')
        item['reply_count'] = legacy_data.get('reply_count')
        item['quote_count'] = legacy_data.get('quote_count')
        item['lang'] = legacy_data.get('lang')
        item['is_retweet'] = 'retweeted_status_id_str' in legacy_data
        item['conversation_id'] = legacy_data.get('conversation_id_str')
        item['source'] = legacy_data.get('source')
        item['views'] = views_data.get('count')
        item['bookmarks'] = legacy_data.get('bookmark_count')

        # 提取媒体内容
        media = []
        for media_item in legacy_data.get('extended_entities', {}).get('media', []):
            if media_item.get('type') == 'photo':
                media.append(media_item.get('media_url_https'))
            elif media_item.get('type') in ['video', 'animated_gif']:
                variants = media_item.get('video_info', {}).get('variants', [])
                best_bitrate = 0
                best_url = ""
                for variant in variants:
                    if variant.get('content_type') == 'video/mp4':
                        bitrate = variant.get('bitrate', 0)
                        if bitrate > best_bitrate:
                            best_bitrate = bitrate
                            best_url = variant.get('url')
                if best_url:
                    media.append(best_url)
        item['media_urls'] = media

        return item

    # 辅助方法实现
    def extract_csrf(self, html_body):
        """从HTML提取CSRF Token"""
        match = re.search(r'"csrf_token":"(\w+)"', html_body)
        return match.group(1) if match else None

    def get_user_id(self, html_body):
        """从用户主页提取用户ID"""
        match = re.search(r'"user_id":"(\d+)"', html_body)
        return match.group(1) if match else None

    def build_headers(self, csrf_token, guest_token):
        """构建GraphQL请求头"""
        return {
            'x-csrf-token': csrf_token,
            'x-guest-token': guest_token,
            'authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA',
            'x-twitter-active-user': 'yes',
            'x-twitter-auth-type': 'OAuth2Session',
            'x-twitter-client-language': 'en'
        }

    def extract_next_cursor(self, instructions):
        """从响应中提取下一页游标"""
        for instr in instructions:
            if instr.get('type') == 'TimelineAddEntries':
                for entry in instr.get('entries', []):
                    if entry.get('entryId', '').startswith('cursor-bottom-'):
                        return entry.get('content', {}).get('value')
        return None