import re
import requests
import os
from multiprocessing.dummy import Pool
from tqdm import tqdm
from retry import retry
import time
import random
from Crypto.Cipher import AES  # 注：python3 安装 Crypto 是 pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple pycryptodome
from urllib.parse import urlparse


def extract_base_url(url):
    # 解析URL
    parsed_url = urlparse(url)
    # 提取协议和域名
    base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
    return base_url
# 安卓手机的User-Agent示例

# 设置headers，包括User-Agent
headers = {
'Sec-Ch-Ua-Platform':'"Android"',
'User-Agent': "Mozilla/5.0 (Linux; Android 13; SM-A536E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36 uacq"
}

def decode_unicode_sequences(s):
    # 使用正则表达式查找 \\uXXXX 序列
    unicode_sequences = re.findall(r'\\u([0-9a-fA-F]{4})', s)
    # 遍历每个序列并尝试解码
    for seq in unicode_sequences:
        # 将 \\uXXXX 转换为 \uXXXX
        escaped_seq = '\\' + 'u' + seq
        # 使用 eval 安全地解码序列（这里假设输入是可信的）
        try:
            unicode_char = eval(f"'{escaped_seq}'")
            # 替换原始序列为解码后的字符
            s = s.replace(f'\\u{seq}', unicode_char)
        except UnicodeDecodeError:
            # 如果解码失败，保留原始序列
            pass

    return s


def remove_last_slash_content(url):
    # 使用rsplit('/', 1)来分割字符串，从右边开始分割，并且只分割一次
    # 这样会得到一个包含两部分的列表：最后一个'/'之前的部分和之后的部分
    parts = url.rsplit('/', 1)

    # 如果分割成功，则parts至少包含两个元素，parts[0]是最后一个'/'之前的部分
    if len(parts) == 2:
        return parts[0] + '/'
    else:
        # 如果url中没有'/'，则直接返回原始url
        return url


def aes_decode(data, key):
    """AES解密
    :param key:  密钥（16.32）一般16的倍数
    :param data:  要解密的数据
    :return:  处理好的数据
    """
    cryptor = AES.new(key, AES.MODE_CBC, key)
    plain_text = cryptor.decrypt(data)
    return plain_text.rstrip(b'\0')  # .decode("utf-8")

def debug( *args):
    print("[debug]", end="")
    for value in args:
        print(value, end="")
    print("")


def info(*args, sep=' ', end='\n', file=None, flush=False):
    output = ""
    for arg in args:
        output += str(arg) + sep
    output = output.rstrip(sep) + end
    if file is None:
        print(output, end="", flush=flush)
    else:
        with open(file, 'a') as f:
            f.write(output)

class Download:
    def __init__(self, url="https://vip.ffzy-play8.com/20230924/1941_7479f0a7/index.m3u8", name="temp", dir="F:\media",
                 info="temp", index_url ="none"):
        if len(info) == 0:
            info = "默认"
        self.url = url
        self.key = None
        self.ts_total = 1024
        self.ts_list = []
        self.file_name = name
        self.index_url=index_url
        self.down_path = f"{dir}/{info}/{self.file_name}"
        self.mp4_name = f"{self.down_path}/{self.file_name}.mp4"
        if os.path.exists(self.mp4_name):
            print(f"{self.mp4_name}已经存在")
            return

        self.check_down_dir()
        self.save_info()
        ret = self.get_ts_list()
        print(f"获取ts流完成")
        if ret:
            print(f"获取ts流地址完成")
            self.download_m3u8()
            print(f"获取ts流数据完成")
            self.merge()
            print(f"合并完成")

    def check_down_dir(self):
        if not os.path.exists(self.down_path):
            # 如果文件夹不存在，则创建它
            try:
                os.makedirs(self.down_path)
                print(f"文件夹 '{self.down_path}' 已创建")
            except OSError as e:
                print(f"创建文件夹 '{self.down_path}' 时发生错误: {e}")


    def save_txt(self,name, info):
        info_name = f"{self.down_path}/{name}"
        if os.path.exists(info_name):
            os.remove(info_name)
        with open(info_name, "wb") as f:
            f.write(info)


    def save_info(self):
        ts_info_name = f"{self.down_path}/readme.txt"
        if os.path.exists(ts_info_name):
            os.remove(ts_info_name)
        with open(ts_info_name, "wb") as f:
            info = f"{self.file_name}\n{self.index_url}\n{self.url}"
            f.write(info.encode())


    def get_ts_list(self):
        url = self.url
        url_tail = None
        is_get_key = 0
        print("请求：", url)
        # https://vip.ffzy-play8.com/20230924/1941_7479f0a7/index.m3u8
        ts_list = []
        # 这个请求是获取真正的m3u8的地址
        try:
            url_ret = requests.get(url, headers=headers)
        except:
            print(url,"请求失败")
            return ts_list
        #有可能这个不是最终m3u8的地址，先找到m3u8的地址
        if "#EXT-X-STREAM-INF:" in url_ret.text:
            if url_ret.status_code != 200:
                print(f"网页{url}返回错误{url_ret.status_code}")
                return None
            print("获取返回的视频信息，包括真正的地址：", url_ret.text)

            url_ret_text = url_ret.text.split("\n")
            # 第一个请求返回的“2000k/hls/mixed.m3u8”拼接上url地址的前半部分，得到的地址才是真正的m3u8 的地址
            # print(url_ret_text)
            url_top = remove_last_slash_content(url)
            for text in url_ret_text:
                if ".m3u8" in text:
                    print(text)
                    url_tail = text
                    break
            url = url_top + url_tail
            print("1获取到真正的地址：", url)
            # https://vip.ffzy-play8.com/20230924/1941_7479f0a7/2000k/hls/mixed.m3u8
            try:
                url_ret = requests.get(url,headers=headers)
            except:
                print(url, "请求失败")
                return ts_list

        print("1请求真正的地址返回的结果：", url_ret.text)
        if ".ts" not in url_ret.text:
            url = extract_base_url(url)+url_tail
            print("2拼接错误，获取到真正的地址：", url)
            try:
                url_ret = requests.get(url,headers=headers)
            except:
                print(url, "请求失败")
                return ts_list
        if ".ts" not in url_ret.text:
            print(url, "请求结果：\n", url_ret.text)
            return ts_list

        url_top = remove_last_slash_content(url)
        file_line = url_ret.text.split("\n")  # 读取文件里的每一行
        for context in file_line:
            if is_get_key==0 and "#EXT-X-KEY" in context:
                #print(file_line)
                searchObj = re.search(r'#EXT-X-KEY:(.*?),URI="(.*?)"', context)
                if searchObj:
                    key_url = searchObj.group(2)
                    key_url = os.path.join(url_top,key_url)
                    #print("key_url=", key_url)
                    res = requests.get(key_url,headers=headers)
                    if res.status_code != 200:
                        print(f"网页{key_url}返回错误{res.status_code}")
                        return None
                    else:
                        key = res.content
                        print("key=", key)
                        self.key = key
                        is_get_key = 1

            if ".ts" in context:#查找ts地址
                ts_list.append(context)
        #print("获取返回的ts流地址", ts_list)
        for ts in ts_list:
            if "https://" in ts:
                self.ts_list.append(ts)
            else:
                self.ts_list.append(os.path.join(url_top,ts))

#        print("对返回的ts地址列表中加上地址头：", self.ts_list)
        self.ts_total = len(self.ts_list)
        return self.ts_total


    @retry(tries=30, delay=1)  # 重试3次, 间隔2s
    def download_ts(self, i, url):
        ts_name = f"{self.down_path}/{i}.ts"
        #  print(ts_name)
        if os.path.exists(ts_name):
            print(ts_name,"已经存在")
            if os.path.getsize(ts_name) >500:
                return
            else:
                os.remove(ts_name)

        for n in range(15):
            try:
                response = requests.get(url, stream=True, timeout=60,headers=headers)
                break
            except requests.exceptions.RequestException as e:
                time.sleep(random.randint(10, 20))
                #print("请求异常:", e)
        else:
            print(f"{i}.ts 下载失败")
            return

        if len(response.content) <500:
            print(f"{i}.ts 下载失败")
            return

        with open(ts_name, "wb") as f:
            if self.key == None:
                f.write(response.content)
            else:
                f.write(aes_decode(response.content, self.key))


    def merge(self):
        mp4_name = self.mp4_name
        if os.path.exists(mp4_name):
            os.remove(mp4_name)

        for i in range(self.ts_total):#保证所有文档都存在在合并
            path = f"{self.down_path}/{i}.ts"
            if not os.path.exists(path):
                return

        with open(mp4_name, "ab") as f:
            for i in range(self.ts_total):
                try:
                    path = f"{self.down_path}/{i}.ts"
                    if os.path.exists(path):
                        content = open(path, "rb").read()
                        f.write(content)
                        os.remove(path)
                except Exception as e:
                    print(e)


    def download_m3u8(self):
        ts_list = self.ts_list
        total = len(ts_list)
        if total == 0:
            print("没有任何ts 下载")
            return
        pbar = tqdm(total=total)
        pbar.set_description(self.file_name)
        update = lambda *args: pbar.update()
        pool = Pool(312)
        for i in range(total):
            #if not os.path.exists(f"{self.down_path}/{i}.ts"):
            pool.apply_async(self.download_ts, args=(i, ts_list[i],), callback=update)
        pool.close()
       # time.sleep(120)
        pool.join()


if __name__ == '__main__':
    #url = "https://vip.ffzy-play8.com/20230924/1941_7479f0a7/index.m3u8"
 #   url = 'https://hd.ijycnd.com/play/zbqlWrDa/index.m3u8'
    url = 'https://ikcdn01.ikzybf.com/20221115/qC6z3Dvy/index.m3u8'
    url = 'https://ikcdn01.ikzybf.com/20221105/DdvD0OV1/index.m3u8'
    m3u8 = Download(url=url, name="西游记", dir="download",
                    info="2023日本古装")

#  ts_list=m3u8.get_ts_list()
# m3u8.download_m3u8(ts_list)