# -*- coding: utf-8 -*-

import time

headers = {
'Accept':'application/json, text/plain, */*',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',
'Connection':'keep-alive',
'Content-Length':'73',
'Content-Type':'application/json',
'Cookie':'_ga=GA1.2.922936372.1617166896; LF_ID=1617166896330-6169665-6008900; GCID=8a5a142-5d865b0-fc474f0-af5bacf; GRID=8a5a142-5d865b0-fc474f0-af5bacf; _gid=GA1.2.621688967.1627805871; GCESS=BgIE01gGYQ0BAQsCBgAMAQEEBAAvDQABCK2gJQAAAAAAAwTTWAZhBwQMjHjTCQEBBQQAAAAACgQAAAAABgQR5GBrCAED; gksskpitn=1f6e2e22-713e-4344-a3b0-722a7ec694d2; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%222465965%22%2C%22first_id%22%3A%2217886a88cfa818-00df0769652529-5771031-1327104-17886a88cfbacd%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_landing_page%22%3A%22https%3A%2F%2Ftime.geekbang.org%2F%22%7D%2C%22%24device_id%22%3A%2217886a88cfa818-00df0769652529-5771031-1327104-17886a88cfbacd%22%7D; Hm_lvt_022f847c4e3acd44d4a2481d9187f1e6=1627215097,1627805871,1627805908,1627821192; Hm_lvt_59c4ff31a9ee6263811b23eb921a5083=1627215097,1627805870,1627805908,1627821192; _gat=1; Hm_lpvt_022f847c4e3acd44d4a2481d9187f1e6=1627822062; Hm_lpvt_59c4ff31a9ee6263811b23eb921a5083=1627822062; gk_process_ev={%22count%22:18%2C%22target%22:%22%22}; SERVERID=1fa1f330efedec1559b3abbcb6e30f50|1627822062|1627821191',
'Host':'time.geekbang.org',
'Origin':'https://time.geekbang.org',
'Referer':'https://time.geekbang.org/column/intro/100017301',
'sec-ch-ua':'"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
'sec-ch-ua-mobile':'?0',
'Sec-Fetch-Dest':'empty',
'Sec-Fetch-Mode':'cors',
'Sec-Fetch-Site':'same-origin',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
}

import json
import requests
import threading
import os
import re

def validateTitle(title):
    # 去除文件不识别符号
    punctuation = '!,;:?"\'、，；“ ” 《 》【】？ + * & /'
    new_title = re.sub(r'[{}]+'.format(punctuation), '', title)
    return new_title.strip()


# def validateTitle(title):
#     punctuation = r'!,;:?"\'、，；“ ”& 《 》【】？ + *| & /'
#     new_title1 = re.sub(r'[{}]+'.format(punctuation),'',title)
#     return new_title1.strip()
# #
def crawl_list_data(obj,index):
    title = obj.get('article_sharetitle')
    new_title = validateTitle(title)
    audio_url = obj.get('audio_url')
    res = requests.get(audio_url)
    save_path = './m3u8/' + str(index) + '/file'            #  \file
    #  存m3u8文件
    save_name = os.path.join(save_path,new_title + '.txt')
    if os.path.exists(save_path) is False:
        os.makedirs(save_path)
    with open(save_name,'wb') as f:
        f.write(res.content)

        #  存ts文件
    with open(save_name,'r') as f:
        for i in f:
            if i.startswith('#'):
                continue
            line  = i.strip()
            ts_url = audio_url.replace('ld.m3u8', '') + line
            save_ts = os.path.join(os.path.abspath(os.path.dirname(save_path)),'ts')
            if not os.path.exists(save_ts):
                os.makedirs(save_ts)
            with requests.get(ts_url) as resul1:
                with open(os.path.join(save_ts +'\\'+line),'wb') as f:
                    f.write(resul1.content)
            print(line+'done')

        # 合并ts数据
        ts_file= os.listdir(save_ts)                 #    \ts
        name_file = os.listdir(save_path)


        vido_path = r'G:\TulingPaCong\day6\video\极客时间\数据结构与算法之美'
        if os.path.exists(vido_path) is False:
            os.makedirs(vido_path)
        names = name_file[0].split('.')[0]+'.mp4'
        with open(fr"{vido_path}\{index}{names}",'wb+') as mp4:
            for i in ts_file:
                mp4.write(open(os.path.join(save_ts +'\\'+i),'rb').read())


def run():
    h = []
    data = {"cid":'126',"size":'100',"prev":'0',"order":"earliest","sample":'false',"chapter_ids":["346","347","348","349","350","344","351","352","353","1143"]}
    res = requests.post(url='https://time.geekbang.org/serv/v1/column/articles',data=json.dumps(data),headers=headers).json()
    parmas = res.get('data')['list']
    for index,i in enumerate(parmas):
        r = threading.Thread(target=crawl_list_data,args = (i,index))
        h.append(r)
        r.start()
    for j in h:
        j.join()


if __name__ == '__main__':
    start = time.time()
    run()
    end = time.time()
    print(fr'用时>>>>>>>>>>:  {end-start}')
