# *_*coding:utf-8 *_*
import os
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from glob import iglob
from pathlib import Path
from queue import Queue
from shutil import rmtree
from urllib.parse import urljoin, urlparse

import requests
from Crypto.Cipher import AES
from dataclasses import dataclass, asdict, field
from m3u8 import M3U8
from m3u8.parser import is_url
from natsort import natsorted

from py3utils.common_util import md5, catch, freeSpace, humanReadbleSize, validateTitle
from py3utils.downloader import DownloadWorker
from downloader.single_downloader import SingleDownloader
from downloader.threadpool_downloader import ThreadPoolDownload
from logger import logger

# from crypto.Cipher import AES
# 注：python3 安装 Crypto 是 pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple pycryptodome
# python 在 Windows下使用AES时要安装的是pycryptodome 模块   pip install pycryptodome
# python 在 Linux下使用AES时要安装的是pycrypto模块   pip install pycrypto
"""
Need python >= 3.6
"""


# def get_natsorted_files(ts_path):
#     ts_path = Path(ts_path).joinpath("*ts").as_posix()
#     return natsorted(iglob(ts_path))

def aes_decode(data, key, iv):
    """AES解密
    :param key:  密钥（16.32）一般16的倍数
    :param data:  要解密的数据
    :return:  处理好的数据
    """
    cryptor = AES.new(key, AES.MODE_CBC, iv=iv)
    plain_text = cryptor.decrypt(data)
    return plain_text.rstrip(b'\0')  # .decode("utf-8")


def enough_free_space(disk, fs=1024 * 1024 * 1024 * 6):
    disk = os.path.abspath(disk)
    free_space = freeSpace(disk)
    print(f"[ Free Space: {disk}]", humanReadbleSize(free_space))
    if free_space < fs:
        return False
    return True


def get_natsorted_files(ts_path):
    return natsorted([i.as_posix() for i in Path(ts_path).iterdir() if i.is_file()])


def merge_ts(ts_files, dist_path, dist_name, decode=None):
    logger.info(f"start merge.....")
    dist_files = natsorted(ts_files)
    if len(dist_files):
        with open(Path(dist_path).joinpath(dist_name).as_posix(), 'wb') as fn:
            for ts in dist_files:
                with open(ts, 'rb') as ft:
                    data = ft.read()
                    if callable(decode):
                        data = decode(data)
                    fn.write(data)
    logger.info(f"merge over.")


def clear_ts(ts_path):
    logger.debug(f"rm {ts_path}")
    rmtree(ts_path)


def merge_all_ts(ts_path=None, file_path=None, key=None, get_ts_func=get_natsorted_files):
    if not any([file_path, ts_path]):
        raise Exception("require filepath or filename")

    def handle_file_path(f_path):
        f = f_path
        if not isinstance(f_path, Path):
            f = Path(f_path)
        f_name = f.parts[-1] + ".ts"
        logger.info(f"dist_path:{f.parent.as_posix()} file_name: {f_name}")
        ts_files = get_ts_func(f.as_posix())
        merge_ts(ts_files, f.parent.as_posix(), f_name, key=key)

    if file_path is not None:
        handle_file_path(file_path)
    if ts_path:
        threadpool = ThreadPoolExecutor(max_workers=10)
        dist_path = Path(ts_path)
        for fp in dist_path.iterdir():
            if not fp.is_dir():
                continue
            threadpool.submit(handle_file_path, fp.as_posix())
            # handle_file_path(fp.as_posix())


@dataclass
class DownLoad_M3U8(object):
    # require m3u8_url or content
    m3u8_url: str
    file_name: str

    # 下载配置
    check_exist: bool = False
    repeat_download: bool = True
    max_workers: int = 5
    download_headers: dict = None
    download_proxies: dict = None
    # m3u8 obj used
    m3u8_obj: M3U8 = None
    content: str = None
    base_path: str = None
    base_uri: str = None
    strict: str = None
    custom_tags_parser: str = None

    # 自身需要记录的属性
    key = None  # 不继承
    key_aes_decode = None  # 不继承
    total = 0  # number of download uri 不继承
    child_m3u8: list = field(default_factory=list)
    father_m3u8: M3U8 = None

    # 功能配置
    # 如果返回有列表， 是否下载列表中的所有
    dl_all_playlist: bool = False
    # 是否自动选择列表
    auto_select_playlist: bool = False
    # 自动选择第一个还是最后一个
    select_first: bool = True
    select_last: bool = False
    # 下载完成是否删除ts
    rm_ts: bool = False
    check_all_ts: bool = True
    merge_ts: bool = True
    ts_path: str = "download"
    out_path: str = "."
    tmp_path: str = ".tmp"
    cache_path: str = Path(tmp_path).joinpath(".cache").as_posix()
    # 请求uri配置
    proxies: dict = None
    headers: dict = None
    timeout: int = 60

    def __post_init__(self):
        self.check_headers()
        if not self.file_name:
            self.file_name = md5(self.m3u8_url)
        else:
            self.file_name = validateTitle(self.file_name)
        suffix = Path(self.file_name).suffix
        if not suffix:
            new_suffix = "ts"
            if not self.file_name.endswith("."):
                new_suffix = f".{new_suffix}"
            self.file_name += new_suffix
        self.cache_path = Path(self.cache_path).joinpath(self.file_name).as_posix()

        if not self.base_uri:
            self.base_uri = self.m3u8_url.rsplit("/", 1)[0]

        if self.check_exist and Path(self.out_path).joinpath(self.file_name).exists():
            logger.info("Already Download!")
            return

        self.ts_path = Path(self.ts_path).joinpath(self.file_name).as_posix()
        Path(self.ts_path).mkdir(parents=True, exist_ok=True)
        Path(self.out_path).mkdir(parents=True, exist_ok=True)
        Path(self.tmp_path).mkdir(parents=True, exist_ok=True)
        Path(self.cache_path).mkdir(parents=True, exist_ok=True)

        logger.info("TS Dir: {}".format(self.ts_path))
        logger.info("File Name: {}".format(self.file_name))
        logger.info("base uri: {}".format(self.base_uri))
        self.m3u8_obj = self.load_m3u8_obj()
        # 是否加密
        self.check_encryption()
        # print(asdict(self))

    def check_headers(self):
        default_headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36', }
        if self.headers and "User-Agent" not in self.headers:
            self.headers.update(default_headers)

    def _get(self, uri, headers=None, proxies=None, timeout=None):
        headers = self.headers if headers is None else headers
        proxies = self.proxies if proxies is None else proxies
        timeout = timeout or self.timeout
        response = catch()(requests.get)(uri, headers=headers, proxies=proxies, verify=False, timeout=timeout)
        if response.status_code != 200:
            raise Exception("response not 200 status: {} {}".format(response.status_code, response.text))
        return response

    def _get_content(self, uri, headers=None, proxies=None, timeout=None, use_cache=False, cache_timeout=None):
        if use_cache:
            cache = self.get_uri_cache(uri, cache_timeout)
            if cache:
                logger.info(f"get content from cache")
                return cache

        response = self._get(uri, headers=headers, proxies=proxies, timeout=timeout)
        content = response.content
        if use_cache:
            self.save_uri_cache(uri, content)
        return content

    def get_uri_cache_file_path(self, uri):
        url_path = urlparse(uri).path
        url_path_split = url_path.split("/")
        last_path_part = url_path_split[-1] if len(url_path_split) > 1 else ""
        suffix = "cache"
        if "." in last_path_part:
            suffix = last_path_part.split(".")[-1]
        return Path(self.cache_path).joinpath(md5(uri)).with_suffix(f".{suffix}")

    def get_uri_cache(self, uri, cache_time_out=None):
        cache_file = self.get_uri_cache_file_path(uri)
        if not cache_file.exists():
            return b""
        if cache_time_out:
            st_ctime = cache_file.stat().st_ctime
            if time.time() > st_ctime + cache_time_out:
                return b""
        return cache_file.read_bytes()

    def save_uri_cache(self, uri, content=b""):
        cache_file = self.get_uri_cache_file_path(uri)
        if not cache_file.parent.exists():
            cache_file.parent.mkdir(parents=True, exist_ok=True)
        cache_file.write_bytes(content)

    def load_m3u8_obj(self):
        if not any([self.m3u8_url, self.content]):
            raise Exception("require m3u8 url or content")
        content = self.content
        if not self.content:
            uri = self.m3u8_url

            if is_url(uri):
                logger.info("url uri:{}".format(uri))
                content = self._get_content(uri, use_cache=True).decode('utf-8')
            else:
                try:
                    content = Path(uri).read_text(encoding="utf-8", errors="ignore")
                except Exception as e:
                    raise Exception(f"uri: {uri} Is not a url and not a file path! Please check.")
                else:
                    logger.info("file uri:{}".format(uri))
        return M3U8(
            content=content,
            base_path=self.base_path,
            base_uri=self.base_uri,
            strict=self.strict,
            custom_tags_parser=self.custom_tags_parser,
        )

    def check_encryption(self):
        if not len(self.m3u8_obj.keys):
            return False
        # 获取解密key (只支持 AES-128解密)
        global_key = self.m3u8_obj.keys[0]
        if not global_key:
            return False
        logger.info("Method: {}".format(global_key.method))
        if global_key.method != 'AES-128':
            raise Exception("Un Support Encryption Method: [ {} ]".format(global_key.method))

        uri = global_key.absolute_uri
        logger.info("key uri: {}".format(uri))
        self.key = self._get_content(uri, use_cache=True)
        real_iv = self.key
        if global_key.iv:
            real_iv = global_key.iv
            if real_iv.startswith("0x"):
                real_iv = real_iv.replace("0x", "")[:16]
        logger.info("key iv: {}".format(real_iv))
        self.key_aes_decode = partial(aes_decode, key=self.key, iv=real_iv)
        logger.info("key value: {}".format(self.key))
        return True

    def get_ts_url_v1(self):
        m3u8_obj = self.m3u8_obj
        # base_uri = m3u8_obj.base_uri
        total = len(m3u8_obj.segments)
        logger.info("Total: {}".format(total))
        for seg in m3u8_obj.segments:
            # uri = urljoin(base_uri, seg.uri)
            uri = seg.absolute_uri
            yield uri

    def playlists_selector(self, playlists):
        message = [f"[{i}] {play.absolute_uri}" for i, play in enumerate(playlists)]
        logger.info('\r\n'.join(message))
        if self.dl_all_playlist:
            return playlists
        if self.auto_select_playlist:
            if self.select_first:
                playlists = playlists[:1]
            elif self.select_last:
                playlists = playlists[-1:]
            else:
                logger.warning(f"Auto Select Not Config, Use First Play.")
                playlists = playlists[:1]
        else:
            while 1:
                try:
                    select_nb = int(input("Please Select: \n"))
                except:
                    logger.warning("please input a number.")
                else:
                    break
            playlists = [playlists[select_nb]]
        return playlists

    def get_ts_url(self):
        m3u8_obj = self.m3u8_obj
        # base_uri = m3u8_obj.base_uri
        # 处理播放列表
        if m3u8_obj.playlists:
            playlists = self.playlists_selector(m3u8_obj.playlists)
            for i, play in enumerate(playlists):
                m3u8_url = urljoin(self.base_uri, play.uri)
                new_config = self.config
                new_config["file_name"] = f"{i} - " + new_config["file_name"]
                new_config["m3u8_obj"] = None
                new_config["content"] = None
                new_config["m3u8_url"] = m3u8_url
                obj = DownLoad_M3U8(**new_config)
                obj.father_m3u8 = self
                self.child_m3u8.append(obj)
                obj.run()
        else:
            if m3u8_obj.segment_map:
                segment_map_uri = m3u8_obj.segment_map.get("uri")
                if segment_map_uri:
                    self.total += 1
                    yield urljoin(self.base_uri, segment_map_uri)
            self.total += len(m3u8_obj.segments)
            if self.total == 0:
                raise Exception("No ts")
            logger.info("Total: {}".format(self.total))
            for seg in m3u8_obj.segments:
                uri = seg.absolute_uri
                # url_parse = urlparse(seg.absolute_uri)
                # uri_parse = f"{url_parse.scheme}://{url_parse.netloc}"
                # if uri_parse != self.base_uri:
                #     uri = seg.absolute_uri.replace(uri_parse, self.base_uri)
                yield uri

    def dl_callback(self, filepath):
        def inner():
            # if self.key:
            #     current_file_path = Path(filepath)
            #     with open(current_file_path, 'rb') as ft:
            #         scline = self.key_aes_decode(ft.read())
            #     decode_path = current_file_path.parent.joinpath("decode")
            #     decode_path.mkdir(parents=True, exist_ok=True)
            #     decode_file_path = decode_path.joinpath(current_file_path.name)
            #     with open(decode_file_path, 'wb') as fn:
            #         fn.write(scline)
            """下载回调"""

        return inner

    def all_ts_over(self):
        if len([f for f in Path(self.ts_path).iterdir() if f.is_file()]) >= self.total:
            return True
        return False

    def download_all_ts(self):
        ts_urls = self.get_ts_url()
        downloader = ThreadPoolDownload(self.max_workers, proxies=self.download_proxies, headers=self.download_headers)
        for index, ts_url in enumerate(ts_urls):
            logger.info("{}.Start: {}".format(index + 1, ts_url))
            filepath = Path(self.ts_path).joinpath(f'{index}.ts')
            downloader.add(ts_url, filepath.as_posix(), self.dl_callback(filepath.as_posix()))
        downloader.join()

    def download_all_ts2(self):
        """
        use Self-built downloader
        """
        ts_urls = self.get_ts_url()
        queue = Queue(self.max_workers)
        for _ in range(self.max_workers):
            t = DownloadWorker(queue, use_proxies=True, try_times=1000)
            t.setDaemon(True)
            t.start()

        for index, ts_url in enumerate(ts_urls):
            logger.info("{}.Start: {}".format(index + 1, ts_url))
            ta_path = Path(self.ts_path).joinpath(f'{index}.ts').as_posix()
            info = {
                "path": ta_path,
                "url": ts_url,
                "headers": self.download_headers,
                "callback": self.dl_callback(ta_path)
            }
            queue.put(info)
        queue.join()
        logger.info("全部ts下载完毕")

    def download_all_ts3(self):
        """
        use Self-built downloader
        """
        ts_urls = self.get_ts_url()
        downloader = SingleDownloader(headers=self.download_headers)
        for index, ts_url in enumerate(ts_urls):
            filepath = os.path.join(self.ts_path, f'{index}.ts')
            logger.info("{}. {} {}".format(index + 1, ts_url, filepath))
            downloader.add(
                ts_url, filepath,
                callback=self.dl_callback(filepath)
            )
        logger.info("全部ts下载完毕")

    def merge_all_ts(self, need_decode=False):
        real_ts_path = self.ts_path
        if self.key and not need_decode:
            real_ts_path = Path(self.ts_path).joinpath("decode")
        merge_ts(
            get_natsorted_files(real_ts_path),
            self.out_path,
            self.file_name,
            decode=self.key_aes_decode if self.key and need_decode else None
        )

    def run(self):
        if not self.repeat_download and Path(self.out_path).joinpath(self.file_name).exists():
            logger.info("Already Download!")
            return
        self.download_all_ts()
        if self.check_all_ts and not self.all_ts_over():
            raise Exception("Ts Not Over!")
        if self.merge_ts:
            self.merge_all_ts(need_decode=True)
        if self.rm_ts:
            clear_ts(self.ts_path)

    @property
    def config(self):
        return asdict(self)


def t_aes_decode():
    # with open("encrypt.key", "rb") as f:
    #     key = f.read()
    key = b'0d620e878cdf9fcc'
    print(key)
    for i, ts in enumerate(natsorted(iglob(r".\*.ts"))):
        print(ts)
        with open(ts, "rb") as f:
            data = f.read()
            with open("decode{}.ts".format(i), "wb") as t:
                t.write(aes_decode(data, key))


if __name__ == '__main__':
    merge_ts(
        get_natsorted_files(r"../tmp/【玩偶姐姐】《森林》第一集深林秘境相遇 - 露天浴池白天干到黑夜.ts"),
        r"C:\download\TapToDual",
        r"【玩偶姐姐】《森林》第一集深林秘境相遇 - 露天浴池白天干到黑夜.ts", key=None
    )
