import json
import logging
import os
import time
import urllib
from datetime import datetime
from urllib.parse import urlparse, urlencode, urlunparse, parse_qsl, parse_qs

from modules.browser_simulate.selenium_simulate import SeleniumSimulate
from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
from scrapy_main.middleware.blog_middle import BlogMiddle
from scrapy_main.opera.scrapy_data_saver import ScrapyDataSaver
from utils.common import obj_to_query_string, safe_file_name
from utils.format import format_url_by_template
from utils.os_main import create_directory, does_file_exist, get_abs_file_path

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class TwitterScraper(BlogMiddle):
    def __init__(self, json_file_path=None, params=None):
        super().__init__(json_file_path, params)


    def get_post_items(self, posts):
        """
        从Instagram API返回的帖子数据中提取并处理有关每个帖子的信息。

        :param posts: 包含帖子信息的字典
        :return: 包含处理后的帖子信息的列表
        """
        items = []

        # 遍历每个帖子
        for item in posts.get('items', []):
            # 获取帖子的标题（如果有的话）
            if item.get('caption', {}) is not None:
                caption = item.get('caption', {}).get('text', '')
            else:
                caption = ''

            media = []  # 用于存储帖子的媒体信息

            # 处理视频帖子
            if 'video_versions' in item:
                video = item['video_versions'][0]
                if video:
                    media.append({
                        'id': video['id'],
                        'code': item['code'],
                        'video': True,
                        'width': video['width'],
                        'height': video['height'],
                        'url': video['url']
                    })
            # 处理包含多媒体的帖子
            elif 'carousel_media' in item:
                for m in item['carousel_media']:
                    mi = self.get_media_info(m)
                    if mi:
                        mi['code'] = item['code']
                        media.append(mi)
            # 处理普通媒体帖子
            else:
                mi = self.get_media_info(item)
                if mi:
                    mi['code'] = item['code']
                    media.append(mi)

            # 处理帖子的发布时间
            taken_at = item.get('taken_at')
            if taken_at:
                taken_at = datetime.fromtimestamp(taken_at)
            else:
                taken_at = None

            # 将帖子信息添加到列表中
            items.append({
                'caption': caption,
                'media': media,
                'taken_at': taken_at
            })

        return items

    def get_posts(self, max_id=None):
        """
        获取用户的帖子信息。

        :param max_id: 可选参数，用于获取下一页帖子，指定帖子的最大ID
        :return: 包含帖子信息的字典
        """
        params = {'count': 12}

        # 如果存在max_id，则将其添加到请求参数中
        if max_id:
            params['max_id'] = max_id

        # 根据用户ID或用户名选择搜索URL
        if self.user_id:
            self.search_url = format_url_by_template(self.search_by_userid, self.__dict__)
        else:
            self.search_url = format_url_by_template(self.search_by_username, self.__dict__)

        # 执行API请求，获取帖子信息
        return self._execute_api(url=self.search_url, params=params)

    def _execute_api(self, method='GET', url='', params=None, data=None):
        if params:
            url += '?' + obj_to_query_string(params)

        response = req.request(method, url, headers=self.headers, data=data, verify=True)
        response.raise_for_status()
        return response.json()

    def scrape_whole_page(self, search_key, counter=None):
        """
        爬取整个用户页面的帖子，并保存到指定目录。

        :param search_key: 搜索关键字，通常为用户ID或用户名
        """
        # 设置搜索关键字，并初始化下一页的最大ID为None
        self.search_key = search_key
        next_max_id = None

        # 创建目录以保存下载的媒体文件
        create_directory(f'{self.target_dir}/{self.search_key}')

        # 进入循环，持续获取并处理帖子信息
        while True:
            # 获取当前页面的帖子信息
            posts = self.get_posts(next_max_id)
            next_max_id = posts.get('next_max_id')
            self.user_id = posts.get('user').get('pk')

            # 获取帖子中的媒体项信息
            items = self.get_post_items(posts)

            # 遍历媒体项，下载并保存媒体文件
            for url_cap in items:
                count = 0
                for url_item in url_cap['media']:
                    count += 1
                    file_type = 'jpg' if not url_item['video'] else 'mp4'
                    file_name = safe_file_name(url_cap['caption']) + str(count)
                    print(f'{file_name}.{file_type}')
                    file_path = get_abs_file_path(
                        f'{self.target_dir}/{self.search_key}/{file_name}.{file_type}')

                    # 如果文件不存在，则下载并保存媒体文件
                    if not does_file_exist(file_path):
                        time.sleep(5)
                        self.saver.save_to_media(url_item['url'], file_path)
                        # 在共享的计数器上进行操作
                        if self.enable_multi_process:
                            # 在共享的计数器上进行操作
                            with counter.get_lock():
                                counter.value += 1
                        else:
                            self.resulter.count += 1

                    # 如果文件存在且开启了仅下载新文件的开关，则中断内层循环
                    elif self.enable_new_only:
                        break

            # 如果不再有更多可用的帖子或没有下一页的最大ID，则结束循环
            if not posts.get('more_available') or not posts.get('next_max_id'):
                print('finished')
                self.user_id = None
                break

    def get_media_info(self, m):
        """
        从媒体项中提取媒体信息。

        :param m: 媒体项字典
        :return: 包含媒体信息的字典，或者None（如果无法提取）
        """
        # 从媒体项中获取媒体信息
        id = m.get('id')
        original_height = m.get('original_height')
        original_width = m.get('original_width')

        original_media_url = None
        media_candidates = m.get('image_versions2', {}).get('candidates', [])

        # 遍历媒体项的候选项，查找与原始高度和宽度匹配的URL
        for c in media_candidates:
            if c.get('width') == original_width and c.get('height') == original_height:
                original_media_url = c.get('url')
                break

        # 如果找不到匹配的URL，并且存在其他候选项，则选择第一个候选项的URL
        if not original_media_url and media_candidates:
            original_media_url = media_candidates[0].get('url')

        # 如果最终无法获取原始媒体URL，则返回None
        if not original_media_url:
            return None

        # 返回包含媒体信息的字典
        return {
            'id': id,
            'video': False,  # 这里假设当前媒体不是视频
            'width': original_width,
            'height': original_height,
            'url': original_media_url
        }


def parse_tweet_json(json_data, tweet_id):
    if isinstance(json_data, str):
        json_data = json.loads(json_data)

    threaded_conversation = json_data.get("data", {}).get("threaded_conversation_with_injections_v2", {})
    instructions = threaded_conversation.get("instructions", [])

    instruction = next((ins for ins in instructions if ins.get("type") == "TimelineAddEntries"), None)

    if instruction:
        entries = instruction.get("entries", [])
        tweet_entry = next((ins for ins in entries if ins.get("entryId") == "tweet-" + tweet_id), None)

        if tweet_entry:
            content = tweet_entry.get("content", {}).get("itemContent", {}).get("tweet_results", {}).get("result", {})

            if "tweet" in content:
                tweet_result = content["tweet"]
            else:
                tweet_result = content

            verbose = True  # Set your verbose flag here
            if verbose:
                print("[ujs][parse_tweet_json] tweet_result", tweet_result, json.dumps(tweet_result))

            tweet_user = tweet_result.get("core", {}).get("user_results", {}).get("result", {})
            tweet_legacy = tweet_result.get("legacy", {})

            if verbose:
                print("[ujs][parse_tweet_json] tweet_legacy", tweet_legacy, json.dumps(tweet_legacy))
                print("[ujs][parse_tweet_json] tweet_user", tweet_user, json.dumps(tweet_user))

            return {"tweet_result": tweet_result, "tweet_legacy": tweet_legacy, "tweet_user": tweet_user}


def parse_tweet_legacy_medias(tweet_result, tweet_legacy, tweet_user):
    if "extended_entities" not in tweet_legacy or "media" not in tweet_legacy["extended_entities"]:
        return []

    medias = []
    type_index = {}  # "photo", "video", "animated_gif"
    index = -1

    for media in tweet_legacy["extended_entities"]["media"]:
        index += 1
        type_ = media["type"]
        type_original = media["type"]
        type_index[type_] = type_index.get(type_, -1) + 1
        if type_ == "animated_gif":
            type_ = "video"
            type_index[type_] = type_index.get(type_, -1) + 1

        download_url = ""
        if "video_info" in media:
            video_info = max(filter(lambda el: "bitrate" in el, media["video_info"]["variants"]), key=lambda el: el["bitrate"])
            download_url = video_info["url"]
        else:
            if "?format=" in media["media_url_https"]:
                download_url = media["media_url_https"]
            else:
                parts = media["media_url_https"].split(".")
                ext = parts[-1]
                url_part = ".".join(parts[:-1])
                download_url = f"{url_part}?format={ext}&name=orig"

        screen_name = tweet_user["legacy"]["screen_name"]
        tweet_id = tweet_result.get("rest_id") or tweet_legacy["id_str"]

        type_index_ = type_index[type_]
        type_index_original = type_index.get(type_original, -1)

        preview_url = media["media_url_https"]
        media_id = media["id_str"]
        media_key = media["media_key"]

        expanded_url = media.get("expanded_url", "")
        short_expanded_url = media.get("display_url", "")
        short_tweet_url = media.get("url", "")
        tweet_text = tweet_legacy.get("full_text", "").replace(f" {media['url']}", "")

        media_entry = {
            "screen_name": screen_name,
            "tweet_id": tweet_id,
            "download_url": download_url,
            "type": type_,
            "type_original": type_original,
            "index": index,
            "type_index": type_index_,
            "type_index_original": type_index_original,
            "preview_url": preview_url,
            "media_id": media_id,
            "media_key": media_key,
            "expanded_url": expanded_url,
            "short_expanded_url": short_expanded_url,
            "short_tweet_url": short_tweet_url,
            "tweet_text": tweet_text,
        }
        medias.append(media_entry)

    verbose = True  # Set your verbose flag here
    if verbose:
        print("[ujs][parse_tweet_legacy_medias] medias", medias)

    return medias


def get_tweet_medias(tweet_json, tweet_id):
    parse_result = parse_tweet_json(tweet_json, tweet_id)


    tweet_result = parse_result['tweet_result']
    tweet_legacy = parse_result['tweet_legacy']
    tweet_user = parse_result['tweet_user']

    result = parse_tweet_legacy_medias(tweet_result, tweet_legacy, tweet_user)

    if tweet_result.get("quoted_status_result"):
        tweet_result_quoted = tweet_result["quoted_status_result"]["result"]
        tweet_legacy_quoted = tweet_result_quoted["legacy"]
        tweet_user_quoted = tweet_result_quoted["core"]["user_results"]["result"]
        result.extend(parse_tweet_legacy_medias(tweet_result_quoted, tweet_legacy_quoted, tweet_user_quoted))

    return result

def get_all_posts(user_id):
    TweetDetailQueryId = "xOhkmRac04YFZmOzU9PJHg"
    UserByScreenNameQueryId = "G3KGOASz96M-Qu0nwmGXNg"
    # 1303687630299643904
    variables = {
        "userId": user_id,
        "count": 20,
        "includePromotedContent": True,
        "withQuickPromoteEligibilityTweetFields": True,
        "withVoice": True,
        "withV2Timeline": True,
    }

    features = {
        'responsive_web_graphql_exclude_directive_enabled': True,
        'verified_phone_label_enabled': False,
        'creator_subscriptions_tweet_preview_api_enabled': True,
        'responsive_web_graphql_timeline_navigation_enabled': True,
        'responsive_web_graphql_skip_user_profile_image_extensions_enabled': False,
        'c9s_tweet_anatomy_moderator_badge_enabled': True,
        'tweetypie_unmention_optimization_enabled': True,
        'responsive_web_edit_tweet_api_enabled': True,
        'graphql_is_translatable_rweb_tweet_is_translatable_enabled': True,
        'view_counts_everywhere_api_enabled': True,
        'longform_notetweets_consumption_enabled': True,
        'responsive_web_twitter_article_tweet_consumption_enabled': False,
        'tweet_awards_web_tipping_enabled': False,
        'freedom_of_speech_not_reach_fetch_enabled': True,
        'standardized_nudges_misinfo': True,
        'tweet_with_visibility_results_prefer_gql_limited_actions_policy_enabled': True,
        'rweb_video_timestamps_enabled': True,
        'longform_notetweets_rich_text_read_enabled': True,
        'longform_notetweets_inline_media_enabled': True,
        'responsive_web_media_download_video_enabled': False,
        'responsive_web_enhance_cards_enabled': False
    }


    url_base = f'https://twitter.com/i/api/graphql/V1ze5q3ijDS1VeLwLY0m7g/UserTweets'
    url_obj = urlparse(url_base)

    # 设置查询参数
    query_params = {
        "variables": json.dumps(variables),
        "features": json.dumps(features),
    }
    # 将新的查询参数合并到原始查询参数中
    merged_query_params = {**dict(parse_qs(url_obj.query)), **query_params}

    # 创建新的 ParseResult 对象
    url_obj = url_obj._replace(query=urlencode(merged_query_params, doseq=True))

    url = urlunparse(url_obj)
    print(url)

    return url


def get_twitter_url_by_id(focal_tweet_id):
    TweetDetailQueryId = "xOhkmRac04YFZmOzU9PJHg"
    UserByScreenNameQueryId = "G3KGOASz96M-Qu0nwmGXNg"
    focalTweetId = focal_tweet_id
    # 1488095485571518468
    # 1738892186819960871
    variables = {
        "focalTweetId": focalTweetId,
        "with_rux_injections": False,
        "includePromotedContent": True,
        "withCommunity": True,
        "withQuickPromoteEligibilityTweetFields": True,
        "withBirdwatchNotes": True,
        "withVoice": True,
        "withV2Timeline": True
    }

    features = {
        "rweb_lists_timeline_redesign_enabled": True,
        "responsive_web_graphql_exclude_directive_enabled": True,
        "verified_phone_label_enabled": False,
        "creator_subscriptions_tweet_preview_api_enabled": True,
        "responsive_web_graphql_timeline_navigation_enabled": True,
        "responsive_web_graphql_skip_user_profile_image_extensions_enabled": False,
        "tweetypie_unmention_optimization_enabled": True,
        "responsive_web_edit_tweet_api_enabled": True,
        "graphql_is_translatable_rweb_tweet_is_translatable_enabled": True,
        "view_counts_everywhere_api_enabled": True,
        "longform_notetweets_consumption_enabled": True,
        "responsive_web_twitter_article_tweet_consumption_enabled": False,
        "tweet_awards_web_tipping_enabled": False,
        "freedom_of_speech_not_reach_fetch_enabled": True,
        "standardized_nudges_misinfo": True,
        "tweet_with_visibility_results_prefer_gql_limited_actions_policy_enabled": True,
        "longform_notetweets_rich_text_read_enabled": True,
        "longform_notetweets_inline_media_enabled": True,
        "responsive_web_media_download_video_enabled": False,
        "responsive_web_enhance_cards_enabled": False
    }

    fieldToggles = {
        "withArticleRichContentState": False
    }

    url_base = f'https://twitter.com/i/api/graphql/{TweetDetailQueryId}/TweetDetail'
    url_obj = urlparse(url_base)

    # 设置查询参数
    query_params = {
        "variables": json.dumps(variables),
        "features": json.dumps(features),
        "fieldToggles": json.dumps(fieldToggles)
    }
    # 将新的查询参数合并到原始查询参数中
    merged_query_params = {**dict(parse_qs(url_obj.query)), **query_params}

    # 创建新的 ParseResult 对象
    url_obj = url_obj._replace(query=urlencode(merged_query_params, doseq=True))

    url = urlunparse(url_obj)
    print(url)

    return url

def get_twitter_content(twitter_url, focal_tweet_id):
    headers = {
        "Authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA",
        "X-Csrf-Token": "35c960cea14ee3b0c9c8097156274c58c4346ff015eb5a4095acf89bfbfe8eaba145372b9abf91fbb6fbbdc25da2b7e83234c617855a51ffc7699dd37f5d8558b81ff196c351b0b224c2ccaa1463dd54",
        "X-Twitter-Active-User": "yes",
        "X-Twitter-Client-Language": "en",
        "X-Twitter-Auth-Type": "OAuth2Session",
        "referer": "https://twitter.com/t_oo_r_oo",
        "x-client-uuid": "d1a25439-5986-4d78-8de9-a2f6a09cfe5c",
        "x-client-transaction-id": "DQRqI4SGm0DuZiX0RG7a7BShagdxTkOz9BhlCzJmx0vSJvNSfXyw0CLjwuFaGAQ52gbXNwxMff+61Gh2wlItow+sMxU6DA",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Cookie": '_ga=GA1.2.1367570655.1697763549; g_state={"i_l":0}; kdt=O69Q8mx0P3VRoKGqQM1WdwHCTgqbxG4aTfNSEeSi; dnt=1; guest_id=v1%3A170322831438300768; guest_id_marketing=v1%3A170322831438300768; guest_id_ads=v1%3A170322831438300768; _gid=GA1.2.844043204.1703468742; auth_token=2a37239250cd16246d524e7f06958aad429bccc2; ct0=35c960cea14ee3b0c9c8097156274c58c4346ff015eb5a4095acf89bfbfe8eaba145372b9abf91fbb6fbbdc25da2b7e83234c617855a51ffc7699dd37f5d8558b81ff196c351b0b224c2ccaa1463dd54; twid=u%3D1715171194520162304; att=1-6DWMfcDTclXM4hY39ZkaLBvp0FIekvKNcpb5vMGd; lang=en; personalization_id="v1_//i6bdWpsOcNlBohQZNqjA=="'
    }

    response = req.request('get', twitter_url, headers=headers, verify=True)
    print(response.status_code)
    print(response.text)

    content = get_tweet_medias(response.text, str(focal_tweet_id))

    return content

def download_media(content):
    for entry in content:
        download_url = entry["download_url"]
        tweet_text = entry["tweet_text"]

        path = urlparse(download_url).path
        # 提取文件名
        filename = os.path.basename(path)
        # 提取文件格式
        query_params = parse_qs(urlparse(download_url).query)
        file_format = query_params.get('format', [''])[0]
        save_name = safe_file_name(tweet_text)
        if file_format:
            source_file_path = get_abs_file_path(f'files/{save_name}.{file_format}')
        else:
            # 提取文件名
            filename = os.path.basename(path)
            # 提取文件格式
            file_extension = os.path.splitext(filename)[1]
            source_file_path = get_abs_file_path(f'files/{save_name}{file_extension}')

        scrapy_saver = ScrapyDataSaver()
        scrapy_saver.save_to_media(download_url, source_file_path)



if __name__ == '__main__':
    # twitter_url = get_twitter_url_by_id(1738565434050515329)
    # content = get_twitter_content(twitter_url, 1738565434050515329)
    # download_media(content)

    twitter_url = get_all_posts(1303687630299643904)
    headers = {
        "Authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA",
        "X-Csrf-Token": "35c960cea14ee3b0c9c8097156274c58c4346ff015eb5a4095acf89bfbfe8eaba145372b9abf91fbb6fbbdc25da2b7e83234c617855a51ffc7699dd37f5d8558b81ff196c351b0b224c2ccaa1463dd54",
        "X-Twitter-Active-User": "yes",
        "X-Twitter-Client-Language": "en",
        "X-Twitter-Auth-Type": "OAuth2Session",
        "referer": "https://twitter.com/t_oo_r_oo",
        "x-client-uuid": "d1a25439-5986-4d78-8de9-a2f6a09cfe5c",
        "x-client-transaction-id": "DQRqI4SGm0DuZiX0RG7a7BShagdxTkOz9BhlCzJmx0vSJvNSfXyw0CLjwuFaGAQ52gbXNwxMff+61Gh2wlItow+sMxU6DA",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Cookie": '_ga=GA1.2.1367570655.1697763549; g_state={"i_l":0}; kdt=O69Q8mx0P3VRoKGqQM1WdwHCTgqbxG4aTfNSEeSi; dnt=1; guest_id=v1%3A170322831438300768; guest_id_marketing=v1%3A170322831438300768; guest_id_ads=v1%3A170322831438300768; _gid=GA1.2.844043204.1703468742; auth_token=2a37239250cd16246d524e7f06958aad429bccc2; ct0=35c960cea14ee3b0c9c8097156274c58c4346ff015eb5a4095acf89bfbfe8eaba145372b9abf91fbb6fbbdc25da2b7e83234c617855a51ffc7699dd37f5d8558b81ff196c351b0b224c2ccaa1463dd54; twid=u%3D1715171194520162304; att=1-6DWMfcDTclXM4hY39ZkaLBvp0FIekvKNcpb5vMGd; lang=en; personalization_id="v1_//i6bdWpsOcNlBohQZNqjA=="'
    }
    response = req.request('get', twitter_url, headers=headers, verify=True)
    print(response.status_code)
    print(response.text)






