# long 爬虫
# {2021/9/13}
# 本章目的：
import aiohttp
import asyncio
import os

async def aiodownload(url):
    name = url.rsplit("/", 1)[1]  # 切最后面的获取url名字
    # 在异步里面必须加async 是规定 with是可以不用谢结束
    async  with aiohttp.ClientSession() as session:
        async with session.get(url) as resp:
            with open("vues/" + name+'.ts', mode="wb") as f:
                f.write(await resp.content.read())  # 读取内容是异步的 需要await挂起
    print(name, "搞定")

def merge_ts():
    #  mac :cat 1.ts 2.ts 3.ts >xxx.mp4
    #  windows :copy /b 1.ts+2.ts+3.ts xxx.mp4
    lst = []
    with open("了不起的老爸：在线播放.m3u8", mode="r", encoding='utf-8') as f:
        for line in f:
            line = line.strip()  # 先去掉空格空行
            if line.startswith("#"):  # 如果以#开头，我不要
                continue
            try:
                name = line.rsplit('/',1)[1]  # 切最后面的获取url名字
            except:
                print("失败")

            lst.append(f"{name}.ts")


    s=" + ".join(lst)
    # copy /b *.ts  new.ts
    b =  'copy /b '+ s + ' ' + '+mvs.mp4'
    print(b)
    os.system(b)
    print("成功")



async def main():
    tasks = []
    # 先读取出链接地址
    # with open("了不起的老爸：在线播放.m3u8", mode="r", encoding='utf-8') as f:
    #     for line in f:
    #         line = line.strip()  # 先去掉空格空行
    #         if line.startswith("#"):  # 如果以#开头，我不要
    #             continue
    #         print(line)
            # tasks.append(aiodownload(line))
        # await asyncio.wait(tasks)
    merge_ts()

if __name__ == '__main__':
    asyncio.run(main())