# -*- coding: UTF-8 -*-
import math
import time

import requests
import json
import os
import threading
from concurrent.futures import ThreadPoolExecutor

headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}


def crawl(ts_url,count):
    if os.path.exists('G:\TulingPaCong\day6\\ts') is False:
        os.makedirs('G:\TulingPaCong\day6\\ts')
    ts =  open(fr'G:\TulingPaCong\day6\ts\{count}.ts', 'wb')
    res = requests.get(ts_url, headers = headers)
    ts.write(res.content)
    print(f'{count}---->done')
    ts.close()

def concat_ts():
    ts_path = r'G:\TulingPaCong\day6\ts'
    if os.path.exists('G:\TulingPaCong\day6\\ts') is False:
        os.makedirs('G:\TulingPaCong\day6\\ts')
    f = os.listdir(ts_path)
    f.sort()
    print(f)
    with open(fr"G:\TulingPaCong\day6\ts\j.mp4",'wb+') as ts:
        for i in range(2,len(f)):
            aaa = open(os.path.join(ts_path,str(i)+'.ts'),'rb').read()
            ts.write(aaa)
            print(f"{i} + 'done'")


# 线程池
def get_ts():
    f = open(r'G:\TulingPaCong\day6\ryUumeAV.m3u8','r',encoding = 'utf8')
    list = []
    with ThreadPoolExecutor(50) as f1:
        count = 0
        for index in f:
            start = time.time()
            # print(index)
            if index.startswith('#'):
                continue
            tsindex = index.strip()
            ts_url = fr'https://m3u8.49cdn.com/hls/65/2018/11/ryUumeAV/{tsindex}'

            print(ts_url)

            t = f1.submit(crawl,ts_url=ts_url,count=count)
            list.append(t)
            count += 1
            print(f'{index}--->>done' )
    f.close()

if __name__ == '__main__':
    start = time.time()
    get_ts()
    concat_ts()
    print('alldone')
    print(f"用时--->>+{time.time() - start}")

