import json
import logging

from modules.request.request_main import request_main as req
from scrapy_main.middleware.blog_middle import BlogMiddle
from utils.common import safe_file_name
from utils.format import format_url_by_template
from utils.os_main import create_directory, get_abs_file_path, get_base_name

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class PixivScraper(BlogMiddle):
    def __init__(self, json_file_path=None, params=None):
        super().__init__(json_file_path, params)

    def get_person_info(self):
        # 格式化搜索 URL，使用对象的属性填充 URL 模板
        search_url = format_url_by_template(self.search_url, self.__dict__)

        # 发送 GET 请求获取个人信息
        person_info = req.request('get', search_url, headers=self.headers, verify=True)

        # 默认将搜索关键字设置为人物名称
        self.person_name = self.search_key

        # 如果响应状态码为 200，解析 JSON 数据获取人物姓名
        if person_info.status_code == 200:
            try:
                # 解析 JSON 数据获取人物姓名
                body = json.loads(person_info.text).get('body')
                self.person_name = body.get('name')
            except (json.JSONDecodeError, AttributeError):
                # 如果解析失败或未找到姓名，保留默认搜索关键字
                pass

    def get_posts(self):
        paging_url = format_url_by_template(self.paging_url, self.__dict__)

        return self._execute_api(url=paging_url)

    def _execute_api(self, method='GET', url=''):
        response = req.request(method, url, headers=self.headers, verify=True)
        response.raise_for_status()
        return response.json()

    def scrape_whole_page(self, search_key, counter=None):
        # 设置搜索关键字并获取人物信息
        self.search_key = search_key
        self.get_person_info()

        # 创建保存目录
        create_directory(get_abs_file_path(f'{self.target_dir}/{self.person_name}'))

        # 获取作品列表
        illusts = self.get_posts().get('body').get('illusts')

        # 遍历作品列表
        for key, value in illusts.items():
            # 设置详情页关键字
            self.detail_key = key

            # 如果关键字存在
            if key:
                # 获取作品标题
                img_title = self._get_image_title()

                # 获取详情信息中的图片 URL 列表
                urls = self._get_image_urls()

                # 遍历图片 URL 列表并下载图片
                for original_img in urls:

                    safe_name = safe_file_name(get_base_name(original_img))
                    file_name = f'{img_title}_{safe_name}'

                    if self.enable_save_database:
                        server_save_path = self.database_config_param['server_save_path']
                        save_path = f'{server_save_path}/{self.script_name}/{self.person_name}'
                        file_path = f'{save_path}/{file_name}'

                        if not self.saver.check_file_exists(file_path):
                            self.saver.save_file_to_server(original_img, file_name, save_path, headers=self.headers)

                            # 在共享的计数器上进行操作
                            if self.enable_multi_process:
                                # 在共享的计数器上进行操作
                                with counter.get_lock():
                                    counter.value += 1
                            else:
                                self.resulter.count += 1

                        # 文件存在且开关打开，则中断循环
                        elif self.enable_new_only:
                            break



                    # if not does_file_exist(file_path):
                    #     file_path = get_abs_file_path(
                    #         f'{self.target_dir}/{self.person_name}/{img_title}_{file_name}')
                    #     time.sleep(1)
                    #     self.saver.save_to_media(original_img, file_path, headers=self.headers)
                    #     if self.enable_multi_process:
                    #         # 在共享的计数器上进行操作
                    #         with counter.get_lock():
                    #             counter.value += 1
                    #     else:
                    #         self.resulter.count += 1
                    #
                    # # 文件不存在且开关打开，则中断循环
                    # elif self.enable_new_only:
                    #     break

    # 在类中添加两个私有方法，分别用于获取作品标题和图片 URL 列表
    def _get_image_title(self):
        detail_info_url = format_url_by_template(self.detail_info_url, self.__dict__)
        detail_info_url_results = self._execute_api(url=detail_info_url)
        return detail_info_url_results.get('body').get('works').get(self.detail_key).get('title')

    def _get_image_urls(self):
        detail_url = format_url_by_template(self.detail_url, self.__dict__)
        detail_results = self._execute_api(url=detail_url)
        return [url_item.get('urls').get('original') for url_item in detail_results.get('body')]


if __name__ == '__main__':
    scraper = PixivScraper('scrapy_main/scripts/pixiv.json')
    scraper.start_scraper()

    # result = req.request('get',
    #                      "https://www.pixiv.net/ajax/user/2107623?full=0&lang=zh&version=42055d64ddbad5c0a69639e157b82e921bf63b31"
    #                      , headers={
    #         'referer': 'https://www.pixiv.net'
    #     })
    # print(result.code)
    # print(json.loads(result.text).get('body').get('name'))

    # result = req.request('get',
    #                      "https://www.pixiv.net/ajax/illust/928990/pages"
    #                      , headers={
    #         'referer': 'https://www.pixiv.net'
    #     })
    # print(result.code)
    # print(result.text)
