import hashlib
import multiprocessing
import os
import re
import sys
import time
from pprint import pprint

from file_idm_cmd import IDMdownload, IDMdownload_start

from concurrent.futures import ThreadPoolExecutor, ALL_COMPLETED, wait
from multiprocessing.pool import ApplyResult
from typing import List, Tuple
from urllib.parse import urlparse, unquote, urljoin

import requests
from requests.adapters import HTTPAdapter
import mylogs

logger = mylogs.Logger(__name__, logformat=mylogs.LOGFORMAT_SHORT0).getLogger()
pool = ThreadPoolExecutor(12)
all_task = []


class Property:
    """
    这个实现并不全面
    可以直接拿pyjavaproperties 的源码来替换此类
    """
    configfilename = ''

    def __init__(self, configfilename="default.properties"):
        self.configfilename = configfilename

    def load_property_files(self, configfilename):
        self.configfilename = configfilename
        try:
            pro_file = open(self.configfilename, mode='rt', encoding="utf-8")
            properties_dict = {}
            for line in pro_file:
                if line.find('=') > 0:
                    strs = line.replace('\n', '').split(sep='=', maxsplit=1)
                    properties_dict[strs[0]] = strs[1]
        except Exception:
            print("读取配置文件 {} 异常".format(configfilename))
            raise Exception
        else:
            pro_file.close()
        return properties_dict

def get_win_gbk_filename(utf8str):
    """
    取网址的Path
    去除QueryString
    强制转为文件名样式
    并带上时间戳
    Parameters
    ----------
    utf8str : string
        DESCRIPTION.

    Returns
    -------
    string
        正规windows文件名

    """
    # url="https://www.cnblogs.com/-pdd/p/15060228.html?test=test"
    res = utf8str.encode(encoding='gbk', errors='replace').decode(encoding='gbk', errors='replace')
    # pattern0 = re.compile(r'\?.+', re.M | re.DOTALL)
    pattern1 = re.compile(r'[\\/:*?"<>|\r\n]+', re.M | re.I)
    # res = pattern0.sub("", utf8str)
    res = pattern1.sub("_", res)
    return res


def get_path_filename(url):
    """
    取网址的Path
    去除QueryString
    强制转为文件名样式
    并带上时间戳
    Parameters
    ----------
    url : string
        DESCRIPTION.

    Returns
    -------
    string
        带上时间戳的文件名

    """
    # url="https://www.cnblogs.com/-pdd/p/15060228.html?test=test"
    pattern0 = re.compile(r'\?.+', re.M | re.DOTALL)
    pattern1 = re.compile(r'[\\/:*?"<>|\r\n]+', re.M | re.I)
    res = pattern0.sub("", url)
    res = pattern1.sub("_", res)
    return res + '_' + str(int(time.time()))


def get_query_filename(url, add_time=1, suffix='.jpg'):
    """
    -  取网址的QueryString
    -  强制转为文件名样式
    -  **jpg|jpeg|gif|png|webp|ico**
    -  use_time并带上时间戳


    Parameters
    ----------
    use_time
    url : string
        DESCRIPTION.

    Returns
    -------
    string
        带上时间戳的文件名
        :param add_time:
        :param url:
        :param suffix:

    """
    # url="https://www.cnblogs.com/-pdd/p/15060228.html?test=test"
    pattern0 = re.compile(r'//(.+?)/', re.M | re.DOTALL)
    site = pattern0.search(url).group(1)
    pattern0 = re.compile(r'=', re.M | re.DOTALL)
    querystr = pattern0.split(url)
    myres = []
    for mystr in querystr:

        pattern0 = re.compile(r'(=|//|:)(.+?)\.(jpg|jpeg|gif|png|webp|ico)', re.M | re.DOTALL)
        m = pattern0.search(mystr)
        if m:
            file_name = site + m.group(2) + '.' + m.group(3)
            if add_time:
                file_name = site + m.group(2) + '_' + str(int(time.time())) + '.' + m.group(3)
            # mylogger.info(file_name)
            pattern1 = re.compile(r'[\\/:*?"<>|\r\n]+', re.M | re.I)
            res = pattern1.sub("_", file_name)
            myres.append(res)
            break
    if myres:
        res = myres[0]
    else:
        res = site
    if add_time:
        res += '_' + str(int(time.time())) + suffix
    pattern1 = re.compile(r'[\\/:*?"<>|\r\n]+', re.M | re.I)
    res = pattern1.sub("_", res)
    
    logger.info(f"rename to {res}")
    return res


def url_is_html(url):
    """
    - 返回 True 可以去看看网页
    - 返回 FALSE 不是正常网页
    Parameters
    ----------
    url to crawl

    Returns
    -------
    url could crawl if true

    """
    if not url:
        print("没有接收到url地址")
        return False
    resp = requests.head(url)
    if "2" != str(resp.status_code)[0:1]:
        print(f"found {resp.status_code=} site at {url}")
        return False
    # print(f"ok {resp.status_code} {resp.headers.get('Content-Type')=} "
    #               f"{resp.encoding=} {resp.apparent_encoding=}")
    if not resp.headers.get('Content-Type'):
        return False
    if resp.headers.get('Content-Type').startswith('text/html'):
        print(f'{resp.status_code} 可以打开看看 {url}')
        return True
    else:
        print(f'{resp.status_code} 这个不是网页 {url}')
        return False


def get_query_filename_as_it_is(url):
    """
    创建网址文件夹
    强制转为文件名样式
    Parameters
    ----------
    url : string
        DESCRIPTION.

    Returns
    -------
    string
        (文件名,文件名所在路径)

    """
    # url="https://www.cnblogs.com/-pdd/p/15060228.html?test=test"
    o = urlparse(url)
    #  相对于host的路径
    res = o.path
    #  /a/b/c
    # TODO: last of char is '/'
    if res.endswith('/'):
        res += 'index'
    res1 = res.split('/')
    res2 = '/'.join(res1[:-1])
    logger.info(f"rename to {res}")
    #  (文件名,文件名所在路径)
    return res, res2

def do_header_str():
    header_str = """
      -H 'Accept: */*' \
      -H 'Accept-Language: zh-CN,zh;q=0.9' \
      -H 'Connection: keep-alive' \
      -H 'sec-ch-ua-mobile: ?0' \
      -H 'sec-ch-ua-platform: "Windows"' 
            """
    t1 = header_str.split('-H')
    t1 = [c.strip().strip("'").split(":", maxsplit=1) for c in t1 if c.strip().strip("'")]
    d1 = {c[0]:c[1].strip() for c in t1}
    # pprint(d1)
    # print(f'{d1["sec-ch-ua"]=}')
    # print(f'{d1["Referer"]=}')
    return d1

def url_request_all_type(url, sub_folder="", suffix="", use_idm=False, usebinary=True, movetofullpath=None):
    if not url:
        return 1, "没有接收到url地址"
    file_name, file_folder = get_query_filename_as_it_is(
        unquote(url, encoding='utf-8', errors='replace'))
    filepath = 'snapshot/' + sub_folder + file_name + suffix
    if movetofullpath:
        filepath = movetofullpath
    # TODO : 如果文件存在则 return
    if os.path.exists(filepath):
        return 3, f"文件已经在 {filepath}"
    if use_idm:
        IDMdownload(url, os.getcwd() + "/" + filepath)
        return (3, "IDM接管")
    #  mkdir
    os.makedirs('snapshot/' + sub_folder + file_folder, mode=0o777, exist_ok=True)
    #  check head
    try:

        headers = do_header_str()
        s = requests.Session()
        s.mount('http://', HTTPAdapter(max_retries=2))
        s.mount('https://', HTTPAdapter(max_retries=2))
        # resp = s.head(url) or requests.head(url)
        resp = s.get(url, timeout=168, headers=headers)
        s.close()
    except (Exception,) as errmsg:
        return 4, f"found Exception site at {url}: {errmsg}"
    # TypeError: 'int' object is not subscriptable
    if "2" != str(resp.status_code)[0:1]:
        return 2, f"found {resp.status_code=} site at {url}"
    else:
        pass
        # logger.info(f"ok {resp.status_code} {resp.headers.get('Content-Type')=} "
        #             f"{resp.encoding=} {resp.apparent_encoding=}")
    with open(filepath, 'wb') as f:
        if not usebinary and 'text' == resp.headers.get('Content-Type')[:4]:
            #  Content of the response, in unicode.
            f.write(resp.text.encode(encoding=resp.encoding))
        else:
            #  Content of the response, in bytes.
            f.write(resp.content)
    return 0, f"下载{url}完毕,文件保存到 {filepath}"


def thread_save_to_m3u8url(url: str):
    """
    多线程
    :param url:
    :return:
    """
    '''
    res = get_query_filename_as_it_is(url)
    logger.info(str(res))
    # TODO : 如果文件存在则 return
    m3u8_path = 'snapshot/' + res[0]
    if os.path.exists(m3u8_path):
        logger.info(f"文件已经在 {m3u8_path}")
        ans = input("是否退出:")
        if ans == 'y':
            sys.exit(0)
    url_request_all_type(url, '', use_idm=False)
    with open('snapshot/' + res[0] + '.local', mode='wt', encoding="utf-8", newline='\n') as flocal:
        with open('snapshot/' + res[0] + '.net', mode='wt', encoding="utf-8", newline='\n') as fw:
            with open('snapshot/' + res[0], mode='rt', encoding="utf-8") as f:
                suffix_num = 0
                for line in f:
                    if line.startswith('#EXT-X-KEY'):
                        pattern0 = re.compile(r'URI="(.+?)"')
                        m = pattern0.search(line)
                        res_url = urljoin(url, m.group(1))
                        url_request_all_type(res_url, '')
                        # 替换为网络地址
                        line_net = pattern0.sub('URI="{}"'.format(res_url), line)
                        line_local = pattern0.sub('URI="{}"'.format(
                            urlparse(res_url.strip('\n')).path.split('/')[-1]), line)
                        fw.write(line_net)
                        flocal.write(line_local)
                    elif line.startswith('#'):
                        fw.write(line)
                        flocal.write(line)
                    else:
                        # ts文件提交
                        # 多线程
                        # 多线程
                        # 多线程多线程多线程多线程多线程多线程多线程多线程多线程
                        suffix_name = "%04d.ts" % suffix_num
                        suffix_num += 1
                        res_url = urljoin(url, line)
                        fw.write(res_url)
                        flocal.write(urlparse(res_url.strip('\n')).path.split('/')[-1] + suffix_name + '\n')
                        taskid = pool.submit(url_request_all_type, res_url.strip('\n'), '', suffix_name)
                        all_task.append(taskid)
    wait(all_task, return_when=ALL_COMPLETED)
    # shell_cmd = """
    # ffmpeg -protocol_whitelist \"file,crypto,http,https,tcp,tls\"  -allowed_extensions ALL -i {} -c copy {}
    # """.format('snapshot/' + res[0] + '.net', 'snapshot' + res[1])
    # os.system(shell_cmd)
    print('\a')
    '''


def check_dir_file_num(dirname, target_num):
    while True:
        time.sleep(360)
        filenames = os.listdir(dirname)
        file_num = len(filenames)
        if file_num < target_num:
            logger.info("多进程下载进度{}/{}".format(file_num, target_num))
            continue
        else:
            logger.info("多进程下载进度:time.sleep(360)")
            time.sleep(360)
            ppool.terminate()
            break


def process_save_to_m3u8url(url: str, use_idm):
    """
    M3U8与视频文件分开存放的情况很常见,需要整理一下代码
    """

    # res = get_query_filename_as_it_is(url)
    # logger.info(str(res))
    site_name = get_query_filename(url, add_time=0, suffix='_')
    hash_name = hashlib.md5(urlparse(url.strip('\n')).path.encode(encoding='utf-8')).hexdigest()[0:20]
    special_index_m3u8 = 'snapshot/obj/' + site_name + hash_name
    print("special_index_m3u8:",special_index_m3u8)
    # TODO : 如果文件存在则 return
    pattern_manual = None
    pattern_manual_name = ""
    manual_num = 0
    if os.path.exists(special_index_m3u8):
        logger.info(f"文件已经在 {special_index_m3u8}")
        # with open(special_index_m3u8,"r",encoding="utf-8") as f:
            # lall = [line for line in f.readlines()]
            # plist = [line for line in lall if line[:4]== 'http']
            # pattern_manual = re.compile(r'(\d+?)(?=\.ts)')
            # m = pattern_manual.search(plist[0])
            # if m:
                # pattern_manual_name = m.group(1)
                # manual_num = int(pattern_manual_name)
                # manual_num = manual_num if 1 < manual_num < 20 and len(pattern_manual_name)>2 else 0
            # else:
                # manual_num = 0
        # print(f"{manual_num=}")
        ans = input("y:保持文件m3u8不动, n:新建m3u8_path再下载, q:终止程序")
        if ans == 'q':
            sys.exit(0)
        if ans == 'n':
            site_name = get_query_filename(url, add_time=1, suffix='_')
            special_index_m3u8 = 'snapshot/obj/' + site_name + hash_name
    url_request_all_type(url, movetofullpath=special_index_m3u8, use_idm=False)
    time.sleep(2)
    if os.path.exists(special_index_m3u8):
        logger.info(f"尝试打开文件 {special_index_m3u8}")
        with open(special_index_m3u8,"r",encoding="utf-8") as f:
            lall = [line for line in f.readlines()]
            plist = [line for line in lall if line[:1]!= '#']
            pattern_manual = re.compile(r'(\d+?)(?=\.ts)')
            m = pattern_manual.search(plist[0])
            if m:
                pattern_manual_name = m.group(1)
                manual_num = int(pattern_manual_name)
                manual_num = manual_num if 1 < manual_num < 1 and len(pattern_manual_name)>2 else 0
            else:
                manual_num = 0
        print(f"{manual_num=}")
    else:
        logger.error("m3u8文件不存在?")
        ans = input("输入manual_num=")
        manual_num = int(ans) or 0
    # os.system("ls resource/hls.min.js resource/index.html "+ 'snapshot/' + res[1]+'/')
    logger.info("解析了url地址{}".format(url))
    ts_url_list = []
    with open(special_index_m3u8 + '.local', mode='wt', encoding="utf-8", newline='\n') as flocal:
        with open(special_index_m3u8 + '.net', mode='wt', encoding="utf-8", newline='\n') as fw:
            with open(special_index_m3u8, mode='rt', encoding="utf-8") as f:
                suffix_num = 0
                for line_index, line in enumerate(f):
                    # 判断 密码文件
                    if line.startswith('#EXT-X-KEY'):
                        pattern0 = re.compile(r'URI="(.+?)"')
                        m = pattern0.search(line)
                        if m:
                            res_url = urljoin(url, m.group(1))
                            ts_url_list.append((res_url, ''))
                            # 替换为网络地址
                            line_net = pattern0.sub('URI="{}"'.format(res_url), line)
                            # 开始处理本机地址
                            line_local = pattern0.sub('URI="{}"'.format(urlparse(res_url.strip('\n')).path), line)
                            # line_local = pattern0.sub('URI="{}"'.format(
                            #     urlparse(res_url.strip('\n')).path.split('/')[-1]), line)
                            fw.write(line_net)
                            flocal.write(line_local)
                    elif line.startswith('#'):
                        # 判断注释
                        fw.write(line)
                        flocal.write(line)
                    else:
                        # 判断 ts文件提交
                        if not suffix_num:
                            # 手动追加的额外的ts文件
                            for iindex in range(manual_num):
                                manual_name = pattern_manual_name[:-2] + "%02d" % iindex
                                suffix_name = "%04d.ts" % suffix_num
                                suffix_num += 1
                                res_url = pattern_manual.sub(manual_name,urljoin(url, line))
                                fw.write(res_url)
                                fw.write("#EXTINF:4.8,\n")
                                # 开始处理本机地址
                                flocal.write(urlparse(res_url.strip('\n')).path + suffix_name + '\n')
                                flocal.write("#EXTINF:4.8,\n")
                                ts_url_list.append((res_url, suffix_name))
                        suffix_name = "%04d.ts" % suffix_num
                        suffix_num += 1
                        res_url = urljoin(url, line)
                        fw.write(res_url)
                        # 开始处理本机地址
                        flocal.write(urlparse(res_url.strip('\n')).path + suffix_name + '\n')
                        # flocal.write(urlparse(res_url.strip('\n')).path.split('/')[-1] + suffix_name + '\n')
                        ts_url_list.append((res_url, suffix_name))
    # 开始提交到进程池
    # ts_num = len(ts_url_list)
    for ts_index, res_url in enumerate(ts_url_list, 1):
        suffix_name = res_url[1]
        sub_folder = ""
        down_url = res_url[0].strip('\n')
        taskid = ppool.apply_async(url_request_all_type, (down_url, sub_folder, suffix_name, use_idm))
        all_task.append((taskid, down_url, sub_folder, suffix_name, use_idm))
        time.sleep(0.1)
        # logger.info("提交进度 {}/{}".format(ts_index, ts_num))
    # ppool.apply_async(check_dir_file_num, ('snapshot/' + res[1], ts_num - 5))
    return all_task


def sleep_func(i):
    time.sleep(6+6*i)
    logger.info("sleep_func{:04d}".format(i))

def check_taskids_useidm(taskids: List[Tuple[ApplyResult, str, str, str]]):
    starttime = time.time()
    logger.info("check_taskids")
    while True:
        res = [taskid[0].ready() for taskid in taskids]
        res_err = [taskid[1] for taskid in taskids if not taskid[0].ready()]
        res_err1 = res_err[0] if res_err else None
        a, b = res.count(True), len(res)
        endtime = time.time()
        durtime = int(endtime - starttime)
        logger.warning("check_taskids:{}/{}__time_use:{}s__{}".format(a, b, durtime, res_err1))
        if a == b:
            logger.warning("check_taskids done")
            IDMdownload_start()
            for taskid in taskids:
                download_res = taskid[0].get()
                if download_res[0] not in [0, 3]:
                    # logger.warning(mylogs.tostring("下载失败:::", download_res))
                    # download_res = url_request_all_type(taskid[1], taskid[2], taskid[3])
                    ppool.apply_async(url_request_all_type, (taskid[1], taskid[2], taskid[3], taskid[4]))
                    if download_res[0] not in [0, 3]:
                        logger.warning(mylogs.tostring("重试失败:::", download_res))
            break
        time.sleep(0.5)
    os.system("start resource/download-complete.wav")

def check_taskids_default(taskids: List[Tuple[ApplyResult, str, str, str]]):
    starttime = time.time()
    logger.info("check_taskids")
    while True:
        res = [taskid[0].ready() for taskid in taskids]
        res_err = [taskid[1] for taskid in taskids if not taskid[0].ready()]
        res_err1 = res_err[0] if res_err else None
        a, b = res.count(True), len(res)
        endtime = time.time()
        durtime = int(endtime - starttime)
        logger.warning("check_taskids:{}/{}__time_use:{}s__{}".format(a, b, durtime, res_err1))
        if a == b:
            logger.warning("check_taskids done")
            for taskid in taskids:
                download_res = taskid[0].get()
                if download_res[0] not in [0, 3]:
                    # logger.warning(mylogs.tostring("下载失败:::", download_res))
                    download_res = url_request_all_type(taskid[1], taskid[2], taskid[3], taskid[4])
                    if download_res[0] not in [0, 3]:
                        logger.warning(mylogs.tostring("重试失败:::", download_res))
            break
        time.sleep(20)
    os.system("start resource/download-complete.wav")

def check_taskids(taskids: List[Tuple[ApplyResult, str, str, str]], use_idm = False):
    if use_idm:
        check_taskids_useidm(taskids)
    else:
        check_taskids_default(taskids)

def hello(msg):
    logger.info(msg)


def ts_to_bigts():
    m3u8filename = 'snapshot/' + '20211007/WlICMog6/hls/index.m3u8' + '.local'
    bigts_name = "outt01.ts"
    with open(bigts_name, mode='ab') as f_ts:
        with open(m3u8filename, mode='rt', encoding="utf-8", newline='\n') as flocal:
            for line in flocal:
                print(line)


if __name__ == '__main__':
    if len(sys.argv) < 2:
        logger.info(str(sys.argv))
        logger.info("need url")
        sys.exit(0)
    elif len(sys.argv) == 3 and sys.argv[2] == 'only':
        res = url_request_all_type(sys.argv[1], "", use_idm=False)
        logger.info(str(res))
        exit(0)
    else:
        # mkdir for download
        os.makedirs('snapshot/obj/', mode=0o777, exist_ok=True)
        use_idm = False
        ppool = multiprocessing.Pool(processes=2)
        url = sys.argv[1]
        # taskids = [(ppool.apply_async(sleep_func, (i,)), i) for i in range(6)]
        taskids = process_save_to_m3u8url(url, use_idm)
        check_taskids(taskids, use_idm)
        ppool.close()
        ppool.join()
        # 阻塞,等待子进程
