# -*- coding: UTF-8 -*-
# 2023/11/26 23:31
import os
from concurrent.futures import ThreadPoolExecutor

import requests
from retry import retry

from tools.get_configer import my_configer
from tools.get_logger import get_logger

configer = my_configer()
loger = get_logger()


class computer_tool:
    def __init__(self):
        self.get_class()

    def get_class(self):
        """
        获取分类,并获取分类页下首张图作为封面,全部存到self.class_id_cover_url_name_save_path_list
        """
        class_url = "http://wallpaper.apc.360.cn/index.php?c=WallPaper&a=getAllCategoriesV2&from=360chrome&adult=false&first=1&order=hot"
        response = requests.get(class_url).json()
        loger.info(f"电脑壁纸请求分类结果:{response}")
        # 截取id和名字
        self.class_id_name_list = [[class_info["id"], class_info["name"]] for class_info in response["data"]]

        # 遍历获取所有分类的id 封面图 分类名,包含待下载的分类[id, cover_img_url, name]的二元数组
        self.class_id_cover_url_name_save_path_list = list()

        with ThreadPoolExecutor(max_workers=8) as computer_cover_pool:
            for class_id_name in self.class_id_name_list:
                get_classinfo_future = computer_cover_pool.submit(self.get_class_info, class_id_name)
                get_classinfo_future.add_done_callback(self.get_class_info_callback)
                setattr(get_classinfo_future, "class_id_name", class_id_name)

            computer_cover_pool.shutdown(wait=True)  # 等待所有线程结束

    def get_class_info(self, class_id_name):
        class_page_url = f"http://wallpaper.apc.360.cn/index.php?c=WallPaper&a=getAppsByCategory&cid={class_id_name[0]}&start=0&count=1&from=360chrome"
        class_page_response = requests.get(class_page_url).json()

        class_cover_img_save_path = os.path.abspath(f"./cache/computer_cover/{class_id_name[0]}.jpg")
        class_cover_img_url = class_page_response["data"][0]["img_1024_768"]

        # 下载封面
        if not os.path.isfile(class_cover_img_save_path):
            self.download_img(class_cover_img_url, class_cover_img_save_path)
            loger.info(f"{class_id_name}下载封面,保存位置:{class_cover_img_save_path},并将封面信息添加到总分类信息列表")
        else:
            loger.info(f"{class_id_name}已下载,下载忽略.保存位置:{class_cover_img_save_path},并将封面信息添加到总分类信息列表")

        self.class_id_cover_url_name_save_path_list.append([class_id_name[0], class_cover_img_url, class_id_name[1], class_cover_img_save_path])

    def get_class_info_callback(self, task_future):
        task_future_result = task_future.result()
        class_id_name = getattr(task_future, "class_id_name")
        try:
            # 如果返回None表示下载成功,更新对应按钮图片
            if task_future_result is None:
                loger.info(f"获取分类信息线程回调id_name:{class_id_name}, 线程无错误,封面下载成功或已下载")

        except:
            task_future_exception = task_future.exception()
            loger.error(f"获取分类信息以及下载线程遇到错误,报错信息为:{type(task_future_exception)}-id_name:{class_id_name}")

    @retry(tries=5)
    def download_img(self, url: str, save_path: str) -> None:
        """
        根据传入的图片地址,进行下载
        :param self:
        :param url: 图片url
        :param save_path: 保存路径
        :return:
        """

        r = requests.get(url)
        response = r.content

        with open(save_path, "wb") as f:
            f.write(response)

        r.close()

        loger.info(f"图片保存成功,保存位置:{save_path},图片url:{url}")


def get_page_img(class_id: str, pageNo: int, page_sum: int):
    """
    根据类别id和页码获取对应分页所有图片地址
    :param class_id: 分类id
    :param pageNo: 页码
    :param page_sum: 单页图片数
    :return: 返回分页图片信息
    """
    class_page_url = f"http://wallpaper.apc.360.cn/index.php?c=WallPaper&a=getAppsByCategory&cid={class_id}&start={page_sum * pageNo}&count={page_sum}&from=360chrome"
    response = requests.get(class_page_url).json()
    loger.info(f"分类ID:{class_id},每页{page_sum}张,第{pageNo}页请求结果:{response}")

    img_list = list()
    for img_info in response["data"]:
        img_dict = dict()
        url_dict = dict()
        url_key_list = list()
        img_dict["img_tag"] = img_info['tag'].replace("category", "").replace("全部", "").replace(" ", "_").replace("__", "_").replace("__", "_").replace("__", "_").replace("__", "_").strip("_")
        img_dict["class_id"] = img_info['class_id']
        img_dict["img_id"] = f"{img_info['class_id']}_{img_info['id']}"
        img_dict["url_key_list"] = url_key_list
        img_dict["img_url"] = url_dict

        try:
            url_dict[f"img_{img_info['resolution']}"] = img_info["url"]
            url_key_list.append(f"img_{img_info['resolution']}")
        except KeyError:
            loger.warning(f"未匹配最大尺寸,最大尺寸为{img_info['resolution']}, json对应img_info:{img_info}")

        try:
            url_dict["img_1600_900"] = img_info["img_1600_900"]
            url_key_list.append("img_1600_900")
        except KeyError:
            loger.warning(f"未匹配到尺寸img_1600_900, json对应img_info:{img_info}")

        try:
            url_dict["img_1440_900"] = img_info["img_1440_900"]
            url_key_list.append("img_1440_900")
        except KeyError:
            loger.warning(f"未匹配到尺寸img_1440_900, json对应img_info:{img_info}")

        try:
            url_dict["img_1366_768"] = img_info["img_1366_768"]
            url_key_list.append("img_1366_768")
        except KeyError:
            loger.warning(f"未匹配到尺寸img_1366_768, json对应img_info:{img_info}")

        try:
            url_dict["img_1280_800"] = img_info["img_1280_800"]
            url_key_list.append("img_1280_800")
        except KeyError:
            loger.warning(f"未匹配到尺寸img_1280_800, json对应img_info:{img_info}")

        try:
            url_dict["img_1280_1024"] = img_info["img_1280_1024"]
            url_key_list.append("img_1280_1024")
        except KeyError:
            loger.warning(f"未匹配到尺寸img_1280_1024, json对应img_info:{img_info}")

        try:
            url_dict["img_1024_768"] = img_info["img_1024_768"]
            url_key_list.append("img_1024_768")
        except KeyError:
            loger.warning(f"未匹配到尺寸img_1024_768, json对应img_info:{img_info}")

        img_list.append(img_dict)

    loger.info(f"截取分类ID:{class_id},每页{page_sum}张,第{pageNo}页结果:{img_list}")
    return img_list
