import re
from Crypto.Cipher import AES
import httpx
from tqdm import tqdm


class XiaoCrawler:

    def __init__(self, url):
        self.url = url
        self.client = httpx.Client(http2=True)
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
        }
        self.m3u8 = ""
        self.ts_url_list = []
        self.key_url = ""

    def get_m3u8(self):
        # 发送请求获取m3u8数据
        resp = self.client.get(url=self.url, headers=self.headers)
        self.m3u8 = resp.text

    def parse_m3u8(self):
        # 用re匹配出key密钥的链接
        self.key_url = re.search(r'URI="(.*?)"', self.m3u8).group(1)
        # 用re匹配出ts片段的链接
        ts_list = re.findall(r',\n(.*?)\n#', self.m3u8, re.S)
        for ts in ts_list:
            ts_url = f"https://encrypt-k-vod.xet.tech/9764a7a5vodtransgzp1252524126/e28162ef5285890801423538874/drm/{ts}"
            self.ts_url_list.append(ts_url)

    def decryption(self, data):
        # 先发送请求获得key密钥
        key = self.client.get(url=self.key_url, headers=self.headers).content
        # 实例化一个AES加密器
        aes = AES.new(key=key, mode=AES.MODE_CBC, iv=b'0000000000000000')
        # 调用decrypt解密方法执行AES-128解密
        decrypt_data = aes.decrypt(data)
        # 返回明文
        return decrypt_data

    def save_video(self):
        # 循环请求获取加密的ts，再进行解密ts并且写入mp4文件
        with open('../video/1.mp4', 'ab') as f:
            for ts_url in tqdm(self.ts_url_list, desc="下载中"):
                resp = self.client.get(url=ts_url, headers=self.headers)
                data = resp.content
                decrypt_data = self.decryption(data)
                f.write(decrypt_data)

    def run(self):
        self.get_m3u8()
        self.parse_m3u8()
        self.save_video()


if __name__ == '__main__':
    m3u8_url = "https://encrypt-k-vod.xet.tech/9764a7a5vodtransgzp1252524126/e28162ef5285890801423538874/drm/v.f230.m3u8?sign=b0ea24fbb8225f7d4d05afa6c68190f1&t=65e2d9a2&us=rsQdBprMpU&time=1709322468718&uuid=u_65e22729695be_6KMHq36Af6"
    spider = XiaoCrawler(m3u8_url)
    spider.run()
