#!/usr/local/python3/bin/python3
import requests
import os
import subprocess
import sys
import threading
import time
import urllib
import logging
from urllib.request import urlretrieve

# 解析m3u8文件，获取ts视频文件地址
def m3u8_list(m3u8_url, title):
    urlretrieve(m3u8_url, title+'.m3u8')
    m3u8_url_list = []
    with open(title+'.m3u8', 'rt') as fout:
        # print(fout.read())
        flag = False
        for f in fout.readlines():
            if f.startswith('#EXTINF'):
                flag = True
            elif flag:
                # print(f)
                m3u8_url_list.append(f[0:-1])
                flag = False
            else:
                pass
    return m3u8_url_list

def down_url(url, filename):
    r = requests.get(url, stream=True)
    with open(filename, 'wb') as fd:
        for chunk in r.iter_content(chunk_size=1024):
            fd.write(chunk)

# 下载ts文件
def recu_down(url,filename): # recurrent download with ContentTooShortError
    try:
        down_url(url,filename)
    except Exception as e:
        logging.exception(e)
        print ('Network conditions is not good. Reloading... %s-%s' %(url, filename))
        recu_down(url,filename)

# 下载ts视频到临时文件夹中
def down_ts(base_m3u8_url, m3u8_url_list, path, title, ts_len):
    for index, item in m3u8_url_list:
        if not item.startswith(base_m3u8_url):
            url = base_m3u8_url + item
        # print(url)
        name = path + str(index) + '.ts'
        print('[%d/%d] \t Now Downing %s  %d.ts' %(index, ts_len-1, url, index))
        recu_down(url, name)

def multi_down_ts(base_m3u8_url, m3u8_url_list, path, title, ts_len, tid = 4):
    job_list = []
    for i in range(tid):
        t = threading.Thread(target=down_ts, 
                    args=(base_m3u8_url, 
                        [(index, item) for index, item in enumerate(m3u8_url_list) if index%tid == i], 
                            path, title, ts_len))
        job_list.append(t)
    for job in job_list:
        job.start()
    for job in job_list:
        job.join()

# 写ffmpeg需要的文件
def write_confile(path, ts_len):
    txt = ''
    for i in range(ts_len):
        txt += 'file \'%s/%d.ts\'\n' %(path, i)
    with open(path + '_confile.txt', 'w') as fout:
        fout.write(txt[0:-1])
# 合并ts视频文件
def merge_ts_video(title, path, v_type='.mp4'):
    cmd = 'ffmpeg -f concat -i %s_confile.txt -c copy \"%s%s\"' %(path, title, v_type)
    print(cmd)
    os.system(cmd)

def main_down(title, m3u8_url):
    v_key = str(time.time()).replace('.', '_')
    if os.path.isfile(title + ".mp4"): 
        print("------- " + title + ".mp4 already exists")
        return
    m3u8_url_list = m3u8_list(m3u8_url, title)
    ts_len = len(m3u8_url_list)
    base_m3u8_url = m3u8_url[0:m3u8_url.rfind('/') + 1]
    path = v_key + '/'
    
    if os.path.exists(v_key):
        os.removedirs(v_key)
    os.mkdir(v_key)
    multi_down_ts(base_m3u8_url, m3u8_url_list, path, title, ts_len, tid = 8)
    write_confile(v_key, ts_len)
    try:
        merge_ts_video(title, v_key)
        delList = os.listdir(v_key)
        for item in delList:
            del_path = os.path.join(v_key, item)
            os.remove(del_path)
        os.removedirs(v_key)
        os.remove(title+'.m3u8')
        os.remove(v_key+'_confile.txt')
    except Exception as e:
        print(e)

def usage():
    print()
    print('Usage: parsing_m3u8_to_video.py m3u8_url video_name')
    print()

if __name__ == '__main__':
    try:
        m3u8_url = sys.argv[1]
        name = sys.argv[2]
        main_down(name, m3u8_url)
    except Exception as e:
        usage()
        logging.exception(e)