import asyncio
import shutil

#
import pydash

#
from common import *
from metadbconn import *

_semaphore = asyncio.Semaphore(CONFIG["async_coroutines"])

GHS_LOGGER = logging.getLogger("ModeGHS")


def fetch_video_files(
    media_dir: Path,
    video_infos: list[tuple[tuple[str], Path]],
    list_episodes_data: list[dict],
):
    for row, real_path in video_infos:
        bgmtv_ID, key, month, sort_in_month, SP_index, raw_name = row
        season_number = 1 if SP_index == "" else 0
        season_dir = media_dir / ("Season 01" if season_number == 1 else "Specials")
        # 按照年和月检索
        target_episodes = [i for i in list_episodes_data[season_number] if i["aired"] and i["aired"][:-3] == month]
        target_index = int(sort_in_month) - 1 if len(target_episodes) > 1 else 0
        if target_index >= len(target_episodes):
            GHS_LOGGER.warning("{}的“{}”找不到对应的元数据，跳过".format(month, raw_name))
            continue
        target_episode = target_episodes[target_index]
        # 设置合理的名称
        video_new_name = "{}_{}N{}_{}{}".format(
            (target_episode["episode"]).zfill(2),
            month,
            str(target_index + 1).zfill(2),
            filename_purified(target_episode["originaltitle"]),
            filename_suffix(raw_name),
        )
        video_new_path = season_dir / video_new_name
        if video_new_path.exists():
            GHS_LOGGER.error(
                "{}的“{}”将要重命名为“{}”，但是该文件已存在，为防止文件永远丢失，程序终止".format(
                    month, raw_name, video_new_path
                )
            )
            break
        else:
            real_path.rename(video_new_path)


async def construct_media_dir(bgmtv_conn: BgmtvConn, dst_root: Path, bgmtv_ID: str, has_SP: bool):
    # ========TV========
    tv_data = await bgmtv_conn.get_season_data_jellyfin(bgmtv_ID, "ja-JP", 1)
    tv_data_common = tv_data["common"]
    media_dir_name = filename_purified(
        "{name} ({year}) - [bgmtvid-{id}]".format(
            name=tv_data_common["originaltitle"], year=tv_data_common["year"], id=bgmtv_ID
        )
    )
    media_dir = dst_root / media_dir_name
    media_dir.mkdir(parents=True, exist_ok=True)
    # 删除tvshow.nfo，等下可以刷新的元数据
    (media_dir / "tvshow.nfo").unlink(True)
    # ========season========
    # 获取S0（如有）和S1的episodes_data
    seasons = ((0, BGMTV_EPISODE_SPECIAL), (1, BGMTV_EPISODE_STANDARD))
    seasons = seasons if has_SP else (seasons[1],)
    for season_number, episode_type in seasons:
        season_dir = media_dir / ("Season 01" if season_number == 1 else "Specials")
        season_dir.mkdir(parents=True, exist_ok=True)
    # ========episodes========
    coroutines = [
        bgmtv_conn.get_episodes_data_jellyfin(bgmtv_ID, "ja-JP", season_number, episode_type, tv_data["total_episodes"])
        for season_number, episode_type in seasons
    ]
    list_episodes_data = await asyncio.gather(*coroutines)
    # ========输出========
    # list_episodes_data的index需要和S0、S1对齐，当成dict用
    if len(list_episodes_data) == 1:
        list_episodes_data = (None, list_episodes_data[0])
    return media_dir, list_episodes_data


async def work_one_tvshow(
    bgmtv_conn: BgmtvConn,
    dst_root: Path,
    bgmtv_ID: str,
    has_SP: bool,
    video_infos: list[tuple[tuple[str], Path]],
):
    try:
        media_dir, list_episodes_data = await construct_media_dir(bgmtv_conn, dst_root, bgmtv_ID, has_SP)
    except httpx.ConnectError as e:
        GHS_LOGGER.error("在bgmtv获取tv“{}”的数据时出现网络错误".format(media_dir.name))
    fetch_video_files(media_dir, video_infos, list_episodes_data)


async def work(metadb_conns):
    source_root = Path(pydash.get(CONFIG, "mode.ghs.source_root")).resolve()
    dst_root = Path(pydash.get(CONFIG, "mode.ghs.dst_root")).resolve()
    exist_videos = {i.name: i for i in source_root.rglob("*") if filename_suffix(i.name) in SUFFIXES_VIDEO}
    refer_id_csv = read_csv(Path(pydash.get(CONFIG, "mode.ghs.refer_id_csv")))[1:]
    refer_id_csv = [i for i in refer_id_csv if i[0] != "" and i[-1] in exist_videos.keys()]
    if DEBUG:
        refer_id_csv = refer_id_csv[:21]
        shutil.rmtree(dst_root)
    dst_root.mkdir(parents=True, exist_ok=True)
    bgmtv_conn: BgmtvConn = metadb_conns["bgmtv"]
    # video_infos_grouped的值为((bgmtv_ID, key, month, sort_in_month, SP_index, raw_name),raw_name对应的real_path)
    video_infos_grouped = {}
    for row in refer_id_csv:
        video_infos_grouped.setdefault(row[0], []).append((row, exist_videos[row[-1]]))
    # ================正式开始================
    coroutines = [
        work_one_tvshow(
            bgmtv_conn, dst_root, bgmtv_ID, any(row[4] != "" for (row, _) in video_infos), video_infos_grouped[bgmtv_ID]
        )
        for bgmtv_ID, video_infos in video_infos_grouped.items()
    ]
    async with _semaphore:
        await asyncio.gather(*coroutines)
