import logging
from datetime import datetime

from modules.request.request_main import request_main as req
from scrapy_main.middleware.blog_middle import BlogMiddle
from utils.common import obj_to_query_string, safe_file_name
from utils.format import format_url_by_template
from utils.os_main import create_directory, get_abs_file_path

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class InsScraper(BlogMiddle):
    def __init__(self, json_file_path=None, params=None):
        super().__init__(json_file_path, params)

    def get_post_items(self, posts):
        """
        从Instagram API返回的帖子数据中提取并处理有关每个帖子的信息。

        :param posts: 包含帖子信息的字典
        :return: 包含处理后的帖子信息的列表
        """
        items = []

        # 遍历每个帖子
        for item in posts.get('items', []):
            # 获取帖子的标题（如果有的话）
            if item.get('caption', {}) is not None:
                caption = item.get('caption', {}).get('text', '')
            else:
                caption = ''

            media = []  # 用于存储帖子的媒体信息

            # 处理视频帖子
            if 'video_versions' in item:
                video = item['video_versions'][0]
                if video:
                    media.append({
                        'id': video['id'],
                        'code': item['code'],
                        'video': True,
                        'width': video['width'],
                        'height': video['height'],
                        'url': video['url']
                    })
            # 处理包含多媒体的帖子
            elif 'carousel_media' in item:
                for m in item['carousel_media']:
                    mi = self.get_media_info(m)
                    if mi:
                        mi['code'] = item['code']
                        media.append(mi)
            # 处理普通媒体帖子
            else:
                mi = self.get_media_info(item)
                if mi:
                    mi['code'] = item['code']
                    media.append(mi)

            # 处理帖子的发布时间
            taken_at = item.get('taken_at')
            if taken_at:
                taken_at = datetime.fromtimestamp(taken_at)
            else:
                taken_at = None

            # 将帖子信息添加到列表中
            items.append({
                'caption': caption,
                'media': media,
                'taken_at': taken_at
            })

        return items

    def get_posts(self, max_id=None):
        """
        获取用户的帖子信息。

        :param max_id: 可选参数，用于获取下一页帖子，指定帖子的最大ID
        :return: 包含帖子信息的字典
        """
        params = {'count': 12}

        # 如果存在max_id，则将其添加到请求参数中
        if max_id:
            params['max_id'] = max_id

        # 根据用户ID或用户名选择搜索URL
        if self.user_id:
            self.search_url = format_url_by_template(self.search_by_userid, self.__dict__)
        else:
            self.search_url = format_url_by_template(self.search_by_username, self.__dict__)

        # 执行API请求，获取帖子信息
        return self._execute_api(url=self.search_url, params=params)

    def _execute_api(self, method='GET', url='', params=None, data=None):
        if params:
            url += '?' + obj_to_query_string(params)

        response = req.request(method, url, headers=self.headers, data=data, verify=True)
        response.raise_for_status()
        return response.json()

    def scrape_whole_page(self, search_key, counter=None):
        """
        爬取整个用户页面的帖子，并保存到指定目录。

        :param search_key: 搜索关键字，通常为用户ID或用户名
        """
        # 设置搜索关键字，并初始化下一页的最大ID为None
        self.search_key = search_key
        next_max_id = None

        # 创建目录以保存下载的媒体文件
        create_directory(get_abs_file_path(f'{self.target_dir}/{self.search_key}'))

        # 进入循环，持续获取并处理帖子信息
        while True:
            # 获取当前页面的帖子信息
            posts = self.get_posts(next_max_id)
            next_max_id = posts.get('next_max_id')
            self.user_id = posts.get('user').get('pk')

            # 获取帖子中的媒体项信息
            items = self.get_post_items(posts)

            # 遍历媒体项，下载并保存媒体文件
            for url_cap in items:
                count = 0
                for url_item in url_cap['media']:
                    count += 1
                    file_type = 'jpg' if not url_item['video'] else 'mp4'
                    caption_name = safe_file_name(url_cap['caption']) + str(count)
                    # print(f'{file_name}.{file_type}')
                    # file_path = get_abs_file_path(
                    #     f'{self.target_dir}/{self.search_key}/{file_name}.{file_type}')
                    #
                    # # 如果文件不存在，则下载并保存媒体文件
                    # if not does_file_exist(file_path):
                    #     time.sleep(5)
                    #     self.saver.save_to_media(url_item['url'], file_path)
                    # # 如果文件存在且开启了仅下载新文件的开关，则中断内层循环
                    # elif self.enable_new_only:
                    #     break

                    if self.enable_save_database:
                        server_save_path = self.database_config_param['server_save_path']
                        file_name = f'{caption_name}.{file_type}'
                        save_path = f'{server_save_path}/{self.script_name}/{search_key}'
                        file_path = f'{save_path}/{file_name}'

                        if not self.saver.check_file_exists(file_path):
                            self.saver.save_file_to_server(url_item['url'], file_name, save_path)

                            # 在共享的计数器上进行操作
                            if self.enable_multi_process:
                                # 在共享的计数器上进行操作
                                with counter.get_lock():
                                    counter.value += 1
                            else:
                                self.resulter.count += 1

                        # 文件存在且开关打开，则中断循环
                        elif self.enable_new_only:
                            break



            # 如果不再有更多可用的帖子或没有下一页的最大ID，则结束循环
            if not posts.get('more_available') or not posts.get('next_max_id'):
                print('finished')
                self.user_id = None
                break

    def get_media_info(self, m):
        """
        从媒体项中提取媒体信息。

        :param m: 媒体项字典
        :return: 包含媒体信息的字典，或者None（如果无法提取）
        """
        # 从媒体项中获取媒体信息
        id = m.get('id')
        original_height = m.get('original_height')
        original_width = m.get('original_width')

        original_media_url = None
        media_candidates = m.get('image_versions2', {}).get('candidates', [])

        # 遍历媒体项的候选项，查找与原始高度和宽度匹配的URL
        for c in media_candidates:
            if c.get('width') == original_width and c.get('height') == original_height:
                original_media_url = c.get('url')
                break

        # 如果找不到匹配的URL，并且存在其他候选项，则选择第一个候选项的URL
        if not original_media_url and media_candidates:
            original_media_url = media_candidates[0].get('url')

        # 如果最终无法获取原始媒体URL，则返回None
        if not original_media_url:
            return None

        # 返回包含媒体信息的字典
        return {
            'id': id,
            'video': False,  # 这里假设当前媒体不是视频
            'width': original_width,
            'height': original_height,
            'url': original_media_url
        }


if __name__ == '__main__':
    # scraper = InsScraper('scrapy_main/scripts/instagram.json')
    # scraper.start_scraper()

    # result = req.request('get',
    #                      "https://i.instagram.com/api/v1/feed/user/mirei_kiritani_/username/?count=12",
    #                      headers={
    #                          "origin": "https://www.instagram.com",
    #                          "referer": "https://www.instagram.com/",
    #                          "x-asbd-id": "198387",
    #                          "x-csrftoken": "undefined",
    #                          "x-ig-app-id": "936619743392459",
    #                          "x-ig-www-claim": "0",
    #                          "x-instagram-ajax": "1009996214",
    #                          "x-requested-with": "XMLHttpRequest"
    #                      })
    # print(result.code)
    # print(result.text)
    # result = req.request('get',
    #                      "https://www.pixiv.net/ajax/user/1980643/profile/illusts?ids[]=106524316&ids[]=104735978&ids[]=100440006&ids[]=99328441&ids[]=98322260&ids[]=98158966&ids[]=95252574&ids[]=94219701&ids[]=91953835&ids[]=90831251&ids[]=88940194&ids[]=86379945&ids[]=84026087&ids[]=83736962&ids[]=83355065&ids[]=82581918&ids[]=82028470&ids[]=81648738&ids[]=79386917&ids[]=78491726&ids[]=74279906&ids[]=74187223&ids[]=71982822&ids[]=71962138&ids[]=71907145&ids[]=71169136&ids[]=67994735&ids[]=66889092&ids[]=65500049&ids[]=65453159&ids[]=65165749&ids[]=64758565&ids[]=64374689&ids[]=63134826&ids[]=61845269&ids[]=60515982&ids[]=60224597&ids[]=59282386&ids[]=59035424&ids[]=58784917&ids[]=58272941&ids[]=58039737&ids[]=57081324&ids[]=56137393&ids[]=55314483&ids[]=53868327&ids[]=53457601&ids[]=52520072&work_category=illust&is_first_page=0&lang=ja",
    #                      )
    result = req.request('get',
                         "https://i.pximg.net/c/250x250_80_a2/img-master/img/2023/03/25/00/01/14/106524316_p0_square1200.jpg"
                         , headers={
            'referer': 'https://www.pixiv.net'
        })
    print(result.code)
    print(result.text)
