import sys
import json
from time import sleep
import requests
import re
import os
from bs4 import BeautifulSoup
from selenium.webdriver.edge.service import Service as EdgeService
from selenium.webdriver.chrome.service import Service as ChromeService
from tqdm.auto import tqdm
from webdriver_manager.microsoft import EdgeChromiumDriverManager
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
from loguru import logger
from PIL import Image
import shutil
from concurrent.futures import ThreadPoolExecutor
# git proxy设置
GIT_PROXY = ""
if "--proxy" in sys.argv:
    GIT_PROXY = sys.argv[sys.argv.index("--proxy") + 1]

# 图片ID
PIC_ID = {}
# 图片存储路径
SAVED_DIR = "img/poolPic"
# 压缩后图片存储路径
COMPRESSED_DIR = "img/poolPicCompressed"
# 图片转换格式
IMAGE_FORMAT = "webp"
# 切换浏览器
# Edge, Chrome
BROWSER = "Chrome"
PICTURE_ID_FIX = {
    "NORM_0_1_3": "银灰色的荣耀",
    "NORM_1_0_1": "搅动潮汐之剑",
    "NORM_2_0_1": "鞘中赤红",
    "NORM_2_0_3": "龙门特别行动专员寻访",
    "NORM_3_0_1": "深夏的守夜人",
    "NORM_3_0_3": "久铸尘铁",
    "NORM_3_0_5": "火舞之人",
    "NORM_4_0_1": "冰封原野",
    "NORM_4_0_4": "联合行动01",
    "NORM_5_0_1": "锁与匙的守卫者",
    "NORM_5_0_3": "凝电之钻",
    "NORM_6_0_1": "热情，膨胀，爆发！",
    "NORM_6_0_4": "地生五金",
    "NORM_7_0_1": "百种兵器",
    "NORM_9_0_5": "联合行动02",
    "NORM_18_0_5": "联合行动04",
    "NORM_20_0_3": "燃钢之心_暴躁铁皮_复刻",
    "NORM_58_0_3": "联合行动18",
    "LIMITED_16_0_1": "地生五金2021",
    "LIMITED_16_0_4": "月隐晦明",
    "ATTAIN_24_0_3": "跨年欢庆_回首",
    "ATTAIN_34_0_3": "跨年欢庆_相逢",
    "ATTAIN_45_0_5": "跨年欢庆_展望",
    "CLASSIC_48_0_1": "中坚干员轮换卡池21",
    "CLASSIC_54_0_3": "中坚干员轮换卡池33"
}
# 卡池图片页面链接
POOL_PICTURE_PAGES = [
    "https://prts.wiki/w/卡池一览/常驻中坚寻访%26中坚甄选/2025",
    "https://prts.wiki/w/卡池一览/限时寻访",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2025",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2024",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2023",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2022",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2021",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2020",
    "https://prts.wiki/w/卡池一览/常驻标准寻访/2019",
    "https://prts.wiki/w/卡池一览/常驻中坚寻访%26中坚甄选/2024",
    "https://prts.wiki/w/卡池一览/常驻中坚寻访%26中坚甄选/2023"
]
# 补漏的卡池图片链接
ADDITIONAL_PAGES = [
    {"url": "https://media.prts.wiki/3/3d/专属推荐干员寻访.png", "name": f"专属推荐干员寻访.{IMAGE_FORMAT}"},
    {"url": "https://media.prts.wiki/4/4c/专属推荐干员寻访02.png", "name": f"专属推荐干员寻访02.{IMAGE_FORMAT}"},
    {"url": "https://media.prts.wiki/f/fd/活动预告_夏活2024_10.jpg", "name": f"活动预告_夏活2024_10.{IMAGE_FORMAT}"}
]

# 日志配置
logger.configure(handlers=[
    {
        "sink": "logs/get_resource.log",
        "format": "{time:YYYY-MM-DD HH:mm:ss.SSS} |{level:8}| {name} : {module}:{line:4} | - {message}",
        "level": "INFO",
        "rotation": "1 MB"
    },
    {
        "sink": "logs/get_resource_debug.log",
        "format": "{time:YYYY-MM-DD HH:mm:ss.SSS} |{level:8}| {name} : {module}:{line:4} | - {message}",
        "level": "DEBUG",
        "rotation": "1 MB"
    },
    {
        "sink": sys.stdout,
        "level": "INFO",
        "format": "{time:YYYY-MM-DD HH:mm:ss.SSS} |<lvl>{level:8}</>| {name} : {module}:{line:4} | - <lvl>{message}</>",
        "colorize": True
    }
])


@logger.catch
def get_picture_id():
    """获取图片ID"""
    picture_id_url = "https://prts.wiki/index.php?title=特殊:搜索&limit=1000&offset=0&ns0=1&ns6=1&ns3000=1&search=gachaPoolId"
    logger.info("正在初始化浏览器...")
    if BROWSER == "Edge":
        options = webdriver.EdgeOptions()
        # options.add_argument("--headless")
        # options.add_argument("--ignore-certificate-errors")
        service = EdgeService(EdgeChromiumDriverManager().install())
        driver = webdriver.Edge(service=service, options=options)
    elif BROWSER == "Chrome":
        options = webdriver.ChromeOptions()
        # options.add_argument("--headless")
        # options.add_argument("--ignore-certificate-errors")
        service = ChromeService(ChromeDriverManager().install())
        driver = webdriver.Chrome(service=service, options=options)
    else:
        exit("指定浏览器错误")
    try:
        logger.info("正在打开页面...")
        driver.get(picture_id_url)
        sleep(10)
        logger.info("返回页面内容...")
        page_source = driver.page_source
    finally:
        logger.info("关闭页面")
        driver.quit()
    logger.info("正在解析页面内容...")
    html = BeautifulSoup(page_source, "html.parser")
    list_array = html.select("li.mw-search-result")
    pic_id_map = {}
    for li in list_array:
        # 遍历每个li元素
        title = li.select_one("div.mw-search-result-heading > a").get("title")
        content = li.select_one("div.searchresult").text
        # print(content)
        # return
        # 跳过extra
        if "extra" in title:
            continue
        # 获取gachaPoolId和gachaBannerFile
        pool_id = re.search(r"gachaPoolId=([^|]*)", content).group(1)
        pic_name = re.search(r"gachaBannerFile=([^}]*)", content).group(1)
        # 删除末尾的格式
        pic_name = pic_name[:pic_name.rfind(".")]
        # 先进行常规重命名
        pic_name = pic_name.replace(" ", "_")
        # 再匹配map中的特殊重命名
        if pool_id in PICTURE_ID_FIX:
            pic_name = PICTURE_ID_FIX[pool_id]
        logger.debug(f"卡池ID映射: {pool_id} => {pic_name}")
        pic_id_map[pool_id] = {
            "id": pool_id,
            "name": pic_name
        }
    return pic_id_map


@logger.catch
def build_pool_info():
    """构建卡池信息"""
    logger.info("正在获取卡池信息1...")
    response = requests.get("https://weedy.prts.wiki/gacha_table.json")
    if response.status_code == 200:
        pool_info = response.json()
    else:
        logger.error("获取 https://weedy.prts.wiki/gacha_table.json 失败")
        exit()
    logger.info("正在获取卡池信息2...")
    response = requests.get(
        f"{GIT_PROXY}https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/zh_CN/gamedata/excel/gacha_table.json")
    if response.status_code == 200:
        pool_content = response.json()
    else:
        logger.error(
            "获取 https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/zh_CN/gamedata/excel/gacha_table.json 失败")
        exit()

    pool_info_map = {}
    logger.info("正在构建卡池信息...")
    for item in pool_info["gachaPoolClient"]:
        if item["gachaPoolId"] not in pool_info_map:
            pool_info_map[item["gachaPoolId"]] = {}

        pool_info_map[item["gachaPoolId"]].update({
            "id": item["gachaPoolId"],  # ID
            "limitedChar": item["gachaPoolDetail"]["detailInfo"]["limitedChar"],  # 卡池限定干员信息
            "upCharInfo": item["gachaPoolDetail"]["detailInfo"]["upCharInfo"]  # 卡池UP干员信息
        })

    for item in pool_content["gachaPoolClient"]:
        if item["gachaPoolId"] not in pool_info_map:
            pool_info_map[item["gachaPoolId"]] = {}

        pool_info_map[item["gachaPoolId"]].update({
            "id": item["gachaPoolId"],  # ID
            "index": item["gachaIndex"],  # Index ID
            "openTs": item["openTime"],  # 卡池开启时间
            "endTs": item["endTime"],  # 卡池结束时间
            "name": item["gachaPoolName"],  # 卡池名字
            "ruleType": item["gachaRuleType"],  # 卡池类型
            "picName": ""  # 卡池图片名字
        })
        # 填入卡池图片名
        if item["gachaPoolId"] in PIC_ID:
            pool_info_map[item["gachaPoolId"]]["picName"] = PIC_ID[item["gachaPoolId"]]["name"]
        # 修正卡池名字
        if "适合多种场合的强力干员" in item["gachaPoolName"]:
            pool_info_map[item["gachaPoolId"]]["name"] = "标准寻访"

    for item in pool_content["newbeeGachaPoolClient"]:
        if item["gachaPoolId"] not in pool_info_map:
            pool_info_map[item["gachaPoolId"]] = {}

        pool_info_map[item["gachaPoolId"]].update({
            "id": item["gachaPoolId"],  # ID
            "index": item["gachaIndex"],  # Index ID
            "name": "新手支援寻访",  # 卡池名字
            "openTs": 0,  # 卡池开启时间
            "endTs": 0,  # 卡池结束时间
            "ruleType": "BOOT",  # 卡池类型
            "picName": PIC_ID[item["gachaPoolId"]]["name"]  # 卡池图片名字
        })
    return pool_info_map


@logger.catch
def get_pictures(mode=False):
    """获取卡池图片下载链接"""
    DOWNLOAD_LIST = ADDITIONAL_PAGES  # 下载url列表（添加了附加页面）
    for page_url in POOL_PICTURE_PAGES:
        # 判断是否只获取最新数据，前三个URL（中坚、限定、常驻）
        if mode and page_url not in POOL_PICTURE_PAGES[:3]:
            continue
        logger.info(f"正在获取页面 {page_url} 的图片...")
        response = requests.get(page_url)
        if not response.status_code == 200:
            logger.error(f"获取 {page_url} 失败，终止进程")
            exit()
        html = BeautifulSoup(response.text, "html.parser")
        tr_list = html.select(".mw-parser-output > table > tbody > tr")
        # 限时寻访页面不需要跳过第一列
        if "限时寻访" in page_url:
            i = 0
        else:
            i = 1
        for tr in tr_list:
            # 跳过标题行
            if len(tr.find_all("td")) <= 0:
                continue
            # 图片所在的td
            picture_td = tr.find_all("td")[i]
            # 获取图片名
            picture_name = picture_td.find("a").attrs["title"].replace(" ", "_")
            if "/" in picture_name:
                picture_name = picture_name.split("/")[1]
            # 获取图片url
            if "data-src" in picture_td.find("img").attrs:
                # 有data-src则用data-src
                picture_url = picture_td.find("img").attrs["data-src"]
            else:
                # 无则用src
                picture_url = picture_td.find("img").attrs["src"]
            picture_url = picture_url[:picture_url.rfind("/")].replace("thumb/", "").replace("·", "_")
            # 给文件名添加后缀名
            picture_name = f"{picture_name}.{picture_url.split(".")[-1]}"
            # 添加到下载列表
            DOWNLOAD_LIST.append({
                "url": picture_url,
                "name": picture_name
            })
    # 下载图片
    for item in tqdm(DOWNLOAD_LIST, desc="下载图片中", unit="image"):
        # 有一个文件名错误
        if item["name"] == "干员轮换卡池72.png":
            item["name"] = item["name"].replace("干员轮换卡池72.png", "干员轮换卡池73.png")
        # 开始下载
        download_picture(item["url"], item["name"], SAVED_DIR)


@logger.catch
def download_picture(url, name, saved_dir):
    """
    下载图片

    :param url: 图片下载链接
    :param name: 保存的图片名
    :param saved_dir: 保存的路径
    :return:
    """
    # 文件路径
    picture_path = os.path.join(saved_dir, name)

    # 确保存储目录存在
    if not os.path.exists(saved_dir):
        os.makedirs(saved_dir)

    # 如果文件已存在，则跳过下载
    if os.path.exists(picture_path):
        return logger.debug(f"文件 {picture_path} 已存在，跳过下载")

    logger.opt(colors=True).info(f"<light-blue>正在下载 {name}...</>")
    response = requests.get(url, stream=True)
    if not response.status_code == 200:
        logger.error(f"下载 {url} 失败，终止进程")
        exit()

    with open(picture_path, "wb") as __f:
        for chunk in response.iter_content(1024):
            if chunk:  # 过滤掉空的响应块
                __f.write(chunk)


@logger.catch
def compress_images(input_dir, output_dir, format='jpg', quality=85, threads=4):
    """
    将input_dir中的所有图片压缩处理后输出到output_dir

    :param input_dir: 原始图片目录
    :param output_dir: 图片输出目录
    :param format: 需要输出的格式(jpg|webp，默认jpg)
    :param quality: 图片质量(0-100，默认85)
    :param threads: 处理的线程数(默认4)
    :return:
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    def process_image(file_path):
        with Image.open(file_path) as img:
            base_name = os.path.basename(file_path)
            name, ext = os.path.splitext(base_name)
            new_file_path = os.path.join(output_dir, f"{name}.{format}")
            img.save(new_file_path, format=format.upper(), quality=quality)

    files = [os.path.join(input_dir, file) for file in os.listdir(input_dir) if
             file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif'))]

    with ThreadPoolExecutor(max_workers=threads) as executor:
        list(tqdm(executor.map(process_image, files), total=len(files), desc="Compressing images"))


@logger.catch
def pic_name_fix():
    with open("resource/pool_info.json", "r", encoding="utf-8") as f:
        pool_info = json.load(f)
    # 添加卡池图片的占位文件
    if not os.path.exists(os.path.join(COMPRESSED_DIR, "null")):
        null_file = open(os.path.join(COMPRESSED_DIR, "null"), "w")
        null_file.close()
    fix_disc = {}
    for id, pool in pool_info.items():
        picName = f"{pool["picName"]}.{IMAGE_FORMAT}"
        if pool["id"] in PICTURE_ID_FIX:
            picName = f"{PICTURE_ID_FIX[pool["id"]]}.{IMAGE_FORMAT}"
        path = os.path.join(COMPRESSED_DIR, picName)
        if not os.path.exists(path) or picName == "" or picName == f".{IMAGE_FORMAT}":
            print(f"ID为 {pool['id']} 的图片 {picName} 不存在，请重新映射\r")
            while True:
                new_name = input("请输入新的图片名，或者输入skip跳过这个文件: ")
                if new_name == "skip":
                    break
                new_path = os.path.join(COMPRESSED_DIR, new_name)
                if not os.path.exists(new_path):
                    print(f"新指定的图片{new_name}不存在，请重新映射。ID为{pool['id']}\r")
                else:
                    break
            fix_disc[pool["id"]] = new_name

    return fix_disc


@logger.catch
def pool_name_fix(pool_info):
    for pool_id, pool in pool_info.items():
        # 给复刻卡池添加复刻字样
        if "复刻" in pool["picName"]:
            pool_info[pool_id]["name"] = pool_info[pool_id]["name"] + "·复刻"
        if "返场" in pool["picName"]:
            pool_info[pool_id]["name"] = pool_info[pool_id]["name"] + "·返场"
        if "联合行动" in pool["picName"]:
            pool_info[pool_id]["name"] = "联合行动"
        if "中坚甄选" in pool["picName"]:
            pool_info[pool_id]["name"] = "中坚甄选"
        if "联合行动" in pool["picName"]:
            pool_info[pool_id]["name"] = "联合行动"
    return pool_info


@logger.catch
def build_char_info():
    # 下载最新文件
    char_info_url = f"{GIT_PROXY}https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/zh_CN/gamedata/excel/character_table.json"
    logger.info("正在获取角色信息...")
    response = requests.get(char_info_url)
    if response.status_code == 200:
        char_info_table = response.json()
        logger.info(f"获取角色信息成功")
    else:
        logger.error(f"获取{char_info_url}角色信息失败")
        exit()

    char_info = {}

    for key, value in char_info_table.items():
        if "char" in key:
            char_info[key] = {
                "id": key,  # ID
                "name": value["name"],  # 代号
                "appellation": value["appellation"],  # 代号
                "mainPower": value["mainPower"],  # 主属性
                "subPower": value["subPower"],  # 次属性
                "displayNumber": value["displayNumber"],  # 干员编号
                "itemObtainApproach": value["itemObtainApproach"],  # 获取方式
                "isSpChar": value["isSpChar"],  # 是否为异格干员
                "rarity": value["rarity"],  # 稀有度
                "profession": value["profession"]  # 职业
            }
    return char_info


@logger.catch
def get_operator_pictures():
    page_url = "https://prts.wiki/w/CHAR#?rarity=1-6%3B5&_d=2"
    logger.info("正在初始化无头浏览器...")
    if BROWSER == "Edge":
        options = webdriver.EdgeOptions()
        options.add_argument("--headless")
        options.add_argument("--ignore-certificate-errors")
        service = EdgeService(EdgeChromiumDriverManager().install())
        driver = webdriver.Edge(service=service, options=options)
    elif BROWSER == "Chrome":
        options = webdriver.ChromeOptions()
        options.add_argument("--headless")
        options.add_argument("--ignore-certificate-errors")
        service = ChromeService(ChromeDriverManager().install())
        driver = webdriver.Chrome(service=service, options=options)
    else:
        exit("指定浏览器错误")
    try:
        logger.info("正在打开页面...")
        driver.get(page_url)
        sleep(5)
        # 获取页数
        select_length = driver.execute_script(
            'return document.querySelectorAll("#pagination div.checkbox-container > div.checkbox-container").length')
        # 返回的图片下载链接列表
        pictures_list = []
        # 循环点击分页
        for i in range(0, select_length):
            driver.execute_script(
                f'document.querySelectorAll("#pagination div.checkbox-container > div.checkbox-container")[{i}].click()')
            # 等待页面加载
            sleep(1)
            # 获取解析本页源码
            page_source = driver.page_source
            page_html = BeautifulSoup(page_source, 'html.parser')
            # 所有img标签
            pictures_doms = page_html.select("#filter-result a > img")
            # 遍历所有img标签
            for picture_dom in pictures_doms:
                # 写入返回列表
                if "data-src" in picture_dom:
                    pictures_list.append({
                        "name": picture_dom["data-src"].split("/")[-1],
                        "url": picture_dom["data-src"]
                    })
                else:
                    pictures_list.append({
                        "name": picture_dom["src"].split("/")[-1],
                        "url": picture_dom["src"]
                    })
    finally:
        driver.quit()
    # 开始下载图片
    for pic in pictures_list:
        download_picture(pic["url"], pic["name"], "img/tx")


@logger.catch
def main():
    logger.opt(colors=True).info("<yellow>Tips: 可以使用 --proxy [ghproxy代理地址]的方式来添加github代理</>")
    logger.opt(colors=True).info("<green>Step1. 开始获取资源</>")
    global PIC_ID
    PIC_ID = get_picture_id()
    logger.debug(json.dumps(PIC_ID, ensure_ascii=False))
    logger.info("获取资源完成")

    logger.opt(colors=True).info("<green>Step2. 开始构建卡池信息</>")
    pool_info = build_pool_info()
    logger.info("pool_info构建完成")
    logger.info("开始修复卡池名")
    pool_info = pool_name_fix(pool_info)
    with open("resource/pool_info.json", "w", encoding="utf-8") as __f:
        json.dump(pool_info, __f, ensure_ascii=False, indent=4)
        logger.info("pool_info.json已写入到本地")

    logger.opt(colors=True).info("<green>Step3. 开始下载卡池图片</>")
    get_pictures()
    logger.info("图片下载完成")

    logger.opt(colors=True).info("<green>Step4. 开始压缩图片</>")
    compress_images(SAVED_DIR, COMPRESSED_DIR, "webp", 70, 6)
    logger.info("图片压缩完成")

    logger.opt(colors=True).info("<green>Step5. 检查图片名是否有误</>")
    pic_disc = pic_name_fix()
    if pic_disc:
        logger.warning(f"手动修正的图片名：{json.dumps(pic_disc, ensure_ascii=False)}")
    else:
        logger.info("图片名无误")

    logger.opt(colors=True).info("<green>Step6. 开始构建角色信息</>")
    char_info = build_char_info()
    with open("resource/char_info.json", "w", encoding="utf-8") as __f:
        json.dump(char_info, __f, ensure_ascii=False, indent=4)
        logger.info("char_info.json已写入到本地")

    logger.opt(colors=True).info("<green>Step7. 获取干员大头图</>")
    get_operator_pictures()
    logger.info("获取干员大头图完成")


if __name__ == '__main__':
    try:
        main()
    except:
        exit()

