from datetime import datetime
from queue import Queue

#
import httpx
import pydash

#
from common import *
from mydb import CachedbConn

METADB_LOGGER = logging.getLogger("MetaDB")


def _overview_purified(text: str):
    text = re.sub(r"\r+", "", text)
    text = re.sub(r"[\t 　]+", " ", text)
    text = re.sub(r"\s{2,}", "\n", text)
    return text.strip()


def _chinese_date_purified(text):
    try:
        return datetime.strptime(text, "%Y年%m月%d日").strftime(DATE_FMT_STR)
    except:
        return ""


def _date_purified(text):
    try:
        return datetime.strptime(text, DATE_FMT_STR).strftime(DATE_FMT_STR)
    except:
        return ""


def _get_runtime(text: str):
    if re.match(r"^\d+:\d+:\d+$", text):
        return text.split(":")[1]
    if re.match(r"^\d+分$", text):
        return text[:-1]
    return ""


class MetadbConn:
    def __init__(self, baseurl, imgurl, token, cacheDB_conn: CachedbConn):
        # self._baseurl = baseurl
        # self._imgurl = imgurl
        # self._token = token
        self._client = None
        self._client_img = None
        self._cacheDB_conn = cacheDB_conn

    async def aclose(self):
        if self._client is not None:
            await self._client.aclose()
            self._client = None
        if self._client_img is not None:
            await self._client_img.aclose()
            self._client_img = None

    async def __aenter__(self):
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self.aclose()

    async def search(self, media_name: str, media_type: str, year: str, lang: str, unsafe=False):
        return {}

    async def get_movie_data_jellyfin(self, metaDB_ID, lang):
        return {}

    async def get_tv_data_jellyfin(self, metaDB_ID, lang, max_season_count=0):
        return {}

    async def get_season_data_jellyfin(self, metaDB_ID, lang, season_number=None):
        return {}

    async def get_episodes_data_jellyfin(self, metaDB_ID, lang, season_number=None, episode_type=None, page_limit=None):
        return {}


def _query_text(**params):
    return "&".join(("{}={}".format(key, value) for key, value in sorted(params.items())))


class TmdbConn(MetadbConn):
    # https://developer.themoviedb.org/reference/
    def __init__(self, baseurl, imgurl, token, cacheDB_conn: CachedbConn):
        super().__init__(baseurl, imgurl, token, cacheDB_conn)
        self._client = httpx.AsyncClient(
            base_url=baseurl, headers={"Authorization": f"Bearer {token}"}, verify=False, follow_redirects=True
        )
        self._client_img = httpx.AsyncClient(
            base_url=imgurl, headers={"Authorization": f"Bearer {token}"}, verify=False, follow_redirects=True
        )

    async def _get_search_results_from_cache(self, media_name: str, media_type: str, year: str, lang: str):
        key_for_year = "first_air_date_year" if media_type == "tv" else "year"
        params = (
            {"query": media_name, key_for_year: year, "language": lang}
            if year
            else {"query": media_name, "language": lang}
        )
        query_text = _query_text(**params)
        cache_key = f"/search/{query_text}"
        rsp_content = self._cacheDB_conn.get_json("tmdb", cache_key)
        if rsp_content is not None:
            return rsp_content
        rsp = await self._client.get(f"search/{media_type}", params=params)
        rsp.raise_for_status()
        rsp_content = rsp.json()
        self._cacheDB_conn.put_json("tmdb", cache_key, rsp_content)
        METADB_LOGGER.info("从TMDB获取数据：搜索项“{}”".format(query_text))
        return rsp_content

    async def search(self, media_name: str, media_type: str, year: str, lang: str, unsafe=False):
        rsp_content = await self._get_search_results_from_cache(media_name, media_type, year, lang)
        query_text = _query_text(query=media_name, year=year, lang=lang)
        if len(rsp_content["results"]) == 0 or media_type is None:
            METADB_LOGGER.warning("搜索项“{}”没有匹配的结果".format(query_text))
            return None, F_SKIP
        elif len(rsp_content["results"]) == 1 or unsafe:
            result_media: dict = pydash.get(rsp_content, "results.0")
            metaDB_ID = str(result_media["id"])
            METADB_LOGGER.info(
                "搜索项“{}”在TMDB中的ID为 {} ，名称为《{}》".format(
                    query_text,
                    metaDB_ID,
                    result_media.get("title") or result_media.get("name"),
                )
            )
            return metaDB_ID, F_OK
        else:
            METADB_LOGGER.warning("搜索项“{}”有多个匹配的结果".format(query_text))
            return None, F_SKIP

    async def _get_picture(self, pic_path):
        "返回pic_bytes,suffix"
        show_pic_path = pic_path[1:]
        cache_key = "/get/picture/{}".format(show_pic_path)
        rsp_content = self._cacheDB_conn.get("tmdb", cache_key)
        if rsp_content is None:
            rsp = await self._client_img.get(pic_path)
            rsp.raise_for_status()
            rsp_content = rsp.content
            self._cacheDB_conn.put("tmdb", cache_key, rsp_content)
            METADB_LOGGER.info("从TMDB获取图片：“{}”".format(show_pic_path))
        return rsp_content, filename_suffix(show_pic_path)

    async def _get_subject_from_cache(self, metaDB_ID, media_type, lang):
        cache_key = f"/get/json/{media_type}/{metaDB_ID}/{lang}"
        rsp_content = self._cacheDB_conn.get_json("tmdb", cache_key)
        if rsp_content is None:
            rsp = await self._client.get(f"{media_type}/{metaDB_ID}", params={"language": lang})
            rsp.raise_for_status()
            rsp_content = rsp.json()
            self._cacheDB_conn.put_json("tmdb", cache_key, rsp_content)
            show_name = rsp_content["title"] if media_type == "movie" else rsp_content["name"]
            METADB_LOGGER.info("从TMDB获取数据：《{}》".format(show_name))
        return rsp_content

    async def _get_season_from_cache(self, metaDB_ID, lang):
        cache_key = f"/get/json/season/{metaDB_ID}/{lang}"
        rsp_content = self._cacheDB_conn.get_json("tmdb", cache_key)
        if rsp_content is None:
            tv_ID, season_number = metaDB_ID.split(".")
            rsp = await self._client.get(f"tv/{tv_ID}/season/{season_number}", params={"language": lang})
            rsp.raise_for_status()
            rsp_content = rsp.json()
            self._cacheDB_conn.put_json("tmdb", cache_key, rsp_content)
            METADB_LOGGER.info("从TMDB获取数据：ID {} 的第{}季".format(tv_ID, season_number))
        return rsp_content

    async def get_movie_data_jellyfin(self, metaDB_ID, lang):
        rsp_content = await self._get_subject_from_cache(metaDB_ID, "movie", lang)
        result = {
            "common": {
                "title": rsp_content["title"],
                "originaltitle": rsp_content["original_title"],
                "plot": _overview_purified(rsp_content["overview"]),
                "year": rsp_content["release_date"].split("-")[0],
                "premiered": rsp_content["release_date"],
                "releasedate": rsp_content["release_date"],
                "runtime": str(rsp_content["runtime"]),
            },
            "genre": [i["name"] for i in rsp_content["genres"]],
            "uniqueid": {"tmdb": metaDB_ID},
            "pictures": {
                "poster": await self._get_picture(rsp_content["poster_path"]),
                "fanart": await self._get_picture(rsp_content["backdrop_path"]),
            },
        }
        collection = rsp_content.get("belongs_to_collection")
        if collection:
            result["common"].update({"collection_number": str(collection["id"]), "set": collection["name"]})
        return result

    async def get_tv_data_jellyfin(self, metaDB_ID, lang, max_season_count=0):
        rsp_content = await self._get_subject_from_cache(metaDB_ID, "tv", lang)
        result = {
            "common": {
                "title": rsp_content["name"],
                "originaltitle": rsp_content.get("original_name", rsp_content["name"]),
                "plot": _overview_purified(rsp_content["overview"]),
                "year": rsp_content["first_air_date"].split("-")[0],
                "premiered": rsp_content["first_air_date"],
                "releasedate": rsp_content["first_air_date"],
                "enddate": rsp_content["last_air_date"],
            },
            "genre": [i["name"] for i in rsp_content["genres"]],
            "uniqueid": {"tmdb": metaDB_ID},
            "season_ids": ["{}.{}".format(metaDB_ID, i["season_number"]) for i in rsp_content["seasons"]],
            "season0": pydash.get(rsp_content, "seasons.0.season_number", 1) == 0,
            "pictures": {
                "poster": await self._get_picture(rsp_content["poster_path"]),
                "fanart": await self._get_picture(rsp_content["backdrop_path"]),
            },
        }
        return result

    async def get_season_data_jellyfin(self, metaDB_ID, lang, season_number=None):
        rsp_content = await self._get_season_from_cache(metaDB_ID, lang)
        result = {
            "common": {
                "title": rsp_content["name"],
                "plot": _overview_purified(rsp_content["overview"]),
                "year": rsp_content["air_date"].split("-")[0],
                "premiered": rsp_content["air_date"],
                "releasedate": rsp_content["air_date"],
                "enddate": rsp_content["episodes"][-1]["air_date"],
                "seasonnumber": str(rsp_content["season_number"]),
            },
            "pictures": {"poster": await self._get_picture(rsp_content["poster_path"])},
            "total_episodes": len(rsp_content["episodes"]),
        }
        return result

    async def get_episodes_data_jellyfin(self, metaDB_ID, lang, season_number=None, episode_type=None, page_limit=None):
        rsp_content = await self._get_season_from_cache(metaDB_ID, lang)
        episodes = rsp_content["episodes"]
        result = [
            {
                "title": episode["name"],
                "plot": _overview_purified(episode["overview"]),
                "season": str(episode["season_number"]),
                "episode": str(episode["episode_number"]),
                "aired": episode["air_date"],
                "runtime": str(episode["runtime"]),
            }
            for episode in episodes
        ]
        return result


# ================================bgmtv================================
BGMTV_SUBJECT_ANIME = 2
BGMTV_EPISODE_STANDARD = 0
BGMTV_EPISODE_SPECIAL = 1


def _get_from_infobox(rsp_content: dict, key):
    return next((i["value"] for i in rsp_content["infobox"] if i["key"] == key), "")


class BgmtvConn(MetadbConn):
    # https://bangumi.github.io/api/
    # SubjectType：1 为 书籍，2 为 动画，3 为 音乐，4 为 游戏，6 为 三次元
    # SubjectAnimeCategory：0 为 其他，1 为 TV，2 为 OVA，3 为 Movie，5 为 WEB
    def __init__(self, baseurl, imgurl, token, user_agent, cacheDB_conn: CachedbConn):
        super().__init__(baseurl, imgurl, token, cacheDB_conn)
        self._client = httpx.AsyncClient(
            base_url=baseurl,
            headers={"Authorization": f"Bearer {token}", "User-Agent": user_agent},
            verify=False,
            follow_redirects=True,
        )
        # bgmtv的图像img是带baseurl的，不需要在client里写
        self._client_img = httpx.AsyncClient(
            headers={"Authorization": f"Bearer {token}", "User-Agent": user_agent}, verify=False, follow_redirects=True
        )

    async def _get_picture(self, pic_path):
        "返回pic_bytes,suffix"
        show_pic_path = pic_path.split("/")[-1]
        cache_key = "/get/picture/{}/large".format(show_pic_path)
        rsp_content = self._cacheDB_conn.get("bgmtv", cache_key)
        if rsp_content is None:
            rsp = await self._client_img.get(pic_path)
            rsp.raise_for_status()
            rsp_content = rsp.content
            self._cacheDB_conn.put("bgmtv", cache_key, rsp_content)
            METADB_LOGGER.info("从bgmtv获取图片：“{}”".format(show_pic_path))
        return rsp_content, filename_suffix(show_pic_path)

    async def _get_subject_from_cache(self, metaDB_ID, lang):
        cache_key = f"/get/json/subject/{metaDB_ID}"
        rsp_content = self._cacheDB_conn.get_json("bgmtv", cache_key)
        if rsp_content is None:
            rsp = await self._client.get(f"subjects/{metaDB_ID}")
            rsp.raise_for_status()
            rsp_content = rsp.json()
            self._cacheDB_conn.put_json("bgmtv", cache_key, rsp_content)
            show_name = rsp_content["name_cn"] or rsp_content["name"]
            METADB_LOGGER.info("从bgmtv获取数据：《{}》的相关作品".format(show_name))
        return rsp_content

    async def _get_relations_from_cache(self, metaDB_ID):
        cache_key = f"/get/json/relations/{metaDB_ID}"
        rsp_content = self._cacheDB_conn.get_json("bgmtv", cache_key)
        if rsp_content is None:
            rsp = await self._client.get(f"subjects/{metaDB_ID}/subjects")
            rsp.raise_for_status()
            rsp_content = rsp.json()
            self._cacheDB_conn.put_json("bgmtv", cache_key, rsp_content)
            METADB_LOGGER.info("从bgmtv获取数据：ID {} 的相关作品".format(metaDB_ID))
        return rsp_content

    async def _get_seasons_by_relations(self, metaDB_ID, max_season_count=0):
        # max_season_count=0表明不限制season的数量，根据bgmtv的API链表式获取
        # max_season_count=N时一共只需要执行N-1次请求
        seasaon_IDs = Queue()
        seasaon_IDs.put(metaDB_ID)
        result = []
        loop_count = 1
        while not seasaon_IDs.empty():
            current_ID = seasaon_IDs.get()
            result.append(str(current_ID))
            if not (max_season_count == 0 or loop_count < max_season_count):
                break
            rsp_content = await self._get_relations_from_cache(current_ID)
            next_season_obj = next(
                filter(
                    lambda i: (i["relation"], i["type"]) == ("续集", BGMTV_SUBJECT_ANIME),
                    rsp_content,
                ),
                None,
            )
            if next_season_obj is not None:
                seasaon_IDs.put(next_season_obj["id"])
            loop_count += 1
        return result

    async def _get_common_data_jellyfin(self, metaDB_ID, lang):
        rsp_content = await self._get_subject_from_cache(metaDB_ID, lang)
        release_date = _date_purified(rsp_content["date"])
        result = {
            "common": {
                "title": rsp_content["name_cn"] or rsp_content["name"],
                "originaltitle": rsp_content["name"],
                "plot": _overview_purified(rsp_content["summary"]),
                "year": release_date.split("-")[0] if release_date else "",
                "premiered": release_date,
                "releasedate": release_date,
            },
            "uniqueid": {"bgmtv": metaDB_ID},
            "pictures": {"poster": await self._get_picture(pydash.get(rsp_content, "images.large"))},
        }
        return rsp_content, result

    async def get_movie_data_jellyfin(self, metaDB_ID, lang):
        rsp_content, result = await self._get_common_data_jellyfin(metaDB_ID, lang)
        result["common"].update({"runtime": _get_runtime(_get_from_infobox(rsp_content, "片长"))})
        result.update({"genre": [i["name"] for i in rsp_content["tags"]]})
        return result

    async def get_tv_data_jellyfin(self, metaDB_ID, lang, max_season_count=0):
        rsp_content, result = await self._get_common_data_jellyfin(metaDB_ID, lang)
        enddate = _chinese_date_purified(_get_from_infobox(rsp_content, "播放结束"))
        result["common"].update({"enddate": enddate})
        result.update(
            {
                "genre": [i["name"] for i in rsp_content["tags"]],
                "season_ids": await self._get_seasons_by_relations(metaDB_ID, max_season_count),
                "season0": False,
            }
        )
        return result

    async def get_season_data_jellyfin(self, metaDB_ID, lang, season_number):
        rsp_content, result = await self._get_common_data_jellyfin(metaDB_ID, lang)
        enddate = _chinese_date_purified(_get_from_infobox(rsp_content, "播放结束"))
        result["common"].update({"enddate": enddate, "seasonnumber": str(season_number)})
        # bgmtv的total_episodes包括special等，不是真正的集数，随条目自动更新
        # eps只有完结以后才会有值，数值为标准回的数量
        result.update({"total_episodes": rsp_content["total_episodes"]})
        return result

    async def _get_episodes_from_cache(self, metaDB_ID, episode_type, page_limit):
        cache_key = f"/get/json/episodes/{metaDB_ID}/limit={page_limit}&type={episode_type}"
        rsp_content = self._cacheDB_conn.get_json("bgmtv", cache_key)
        if rsp_content is None:
            rsp = await self._client.get(
                "episodes", params={"subject_id": metaDB_ID, "type": episode_type, "limit": page_limit, "offset": 0}
            )
            rsp.raise_for_status()
            rsp_content = rsp.json()
            self._cacheDB_conn.put_json("bgmtv", cache_key, rsp_content)
            METADB_LOGGER.info("从bgmtv获取数据：ID {} 的类型为 {} 的单元集信息".format(metaDB_ID, episode_type))
        return rsp_content

    async def get_episodes_data_jellyfin(self, metaDB_ID, lang, season_number, episode_type, page_limit):
        # 本篇 = 0 特别篇 = 1 OP = 2 ED = 3 预告/宣传/广告 = 4 MAD = 5 其他 = 6
        rsp_content = await self._get_episodes_from_cache(metaDB_ID, episode_type, page_limit)
        episodes = rsp_content["data"]
        # runtime为00:23:59则取23，向下取整
        result = [
            {
                "title": episode["name_cn"] or episode["name"],
                "originaltitle": episode["name"],
                "plot": _overview_purified(episode["desc"]),
                "season": str(season_number),
                # SP的episode有可能是18.5这种小数
                "episode": str(episode["ep"]) if episode_type == BGMTV_EPISODE_STANDARD else str(episode["sort"]),
                "aired": _date_purified(episode["airdate"]),
                "runtime": _get_runtime(episode["duration"]),
            }
            for episode in episodes
        ]
        return result
