# -*- coding:utf-8 -*-
import os
import random
import sys
import threading

import requests
import datetime
from Crypto.Cipher import AES
import re
import json
import sqlite3

from multiprocessing import Lock
from binascii import b2a_hex, a2b_hex

import os


import time

def progress(process_name, percent, width=50):
    '''
    进度打印功能
    :param percent: 进度
    :param width: 进度条长度
    '''
    if percent >= 100:
        percent = 100

    show_str = (process_name + ':[%%-%ds]' % width) % (int(width * percent / 100) * "#")  # 字符串拼接的嵌套使用
    print('\r%s %d%%' % (show_str, percent), end='')







os.environ['NO_PROXY'] = '133925.com'
# reload(sys)
# sys.setdefaultencoding('utf-8')
# 秒数，如果等待超时，Requests会抛出异常
g_timeout = 30

def download(url, task_id):
    download_path = os.getcwd() + "\download"
    if not os.path.exists(download_path):
        os.mkdir(download_path)

    # 新建日期文件夹
    download_path = os.path.join(download_path, datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + random.sample('zyxwvutsrqponmlkjihgfedcba',8) + "_id_" + str(task_id))
    # print download_path
    os.mkdir(download_path)


    all_content = requests.get(url, timeout=g_timeout).text  # 获取第一层M3U8文件内容
    all_content.encode('utf-8')
    if "#EXTM3U" not in all_content:
        raise BaseException("非M3U8的链接")

    if "EXT-X-STREAM-INF" in all_content:  # 第一层
        file_line = all_content.split("\n")
        for line in file_line:
            if '.m3u8' in line:
                url = url.rsplit("/", 1)[0] + "/" + line  # 拼出第二层m3u8的URL
                all_content = requests.get(url, timeout=g_timeout).text

    file_line = all_content.split("\n")
    print("length:" + str(len(file_line)))

    unknow = True
    key = ""
    idx= 0
    for index, line in enumerate(file_line):  # 第二层
        if "#EXT-X-KEY" in line:  # 找解密Key
            method_pos = line.find("METHOD")
            comma_pos = line.find(",")
            method = line[method_pos:comma_pos].split('=')[1]
            print("Decode Method：", method)

            uri_pos = line.find("URI")
            quotation_mark_pos = line.rfind('"')
            key_path = line[uri_pos:quotation_mark_pos].split('"')[1]

            key_url = url.rsplit("/", 1)[0] + "/" + key_path  # 拼出key解密密钥URL
            res = requests.get(key_url)
            key = res.content
            print("key：", key)

        if "EXTINF" in line:  # 找ts地址并下载
            unknow = False
            pd_url = url.rsplit("/", 1)[0] + "/" + file_line[index + 1]  # 拼出ts片段的URL


            res = requests.get(pd_url, timeout=g_timeout)
            c_fule_name = file_line[index + 1].rsplit("/", 1)[-1]
            idx = idx + 1
            if len(key):  # AES 解密
                cryptor = AES.new(key, AES.MODE_CBC, key)
                with open(os.path.join(download_path, c_fule_name + ".mp4"), 'ab') as f:
                    f.write(cryptor.decrypt(res.content))
            else:
                with open(os.path.join(download_path, str(idx)+".ts"), 'ab') as f:
                    f.write(res.content)
                    f.flush()
                    print(task_id + ": " + str(idx) + "/" + str(file_line))
    if unknow:
        raise BaseException("未找到对应的下载链接")
    else:
        print("下载完成")
    merge_file(download_path)


def merge_file(path):
    os.chdir(path)
    cmd = "copy /b * new.tmp"
    os.system(cmd)
    os.system('del /Q *.ts')
    os.system('del /Q *.mp4')
    os.rename("new.tmp", "new.mp4")

proxies = {"http": None, "https": None}


def process_link(base_url, type_url):
    headers = {
        'Accept': 'application/json',
        'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
    }
    session = requests.Session()
    session.trust_env = False
    # response = session.get(base_url + type_url)
    content = requests.get(base_url + type_url, proxies=proxies, headers=headers).text
    if content is None:
        return
    reg = "/play/[0-9]*.html"
    li = re.findall(reg, content, False)
    if(len(li) == 0):
        return
    list = []
    for i in li:
        print(i)
        list.append(base_url + i)

    print(list)

    #https://play.mba:8866/pp/?url=https://cdn.h748.com/avid61228d82d0330/index.m3u8
    reg2 = "https://cdn.[\s\S]*index.m3u8"

    page_link = []

    for i in list:
        html = requests.get(i).text
        link = re.findall(reg2, html, False)
        if len(link) > 0:
            page_link.append(link[0])

    if (len(page_link) == 0):
        return
    with open("video_link.txt", "a+") as file:
        for i in page_link:
            page = requests.get(i).text
            print(page)
            cut = i.split("index.m3u8")
            base_link = cut[0]
            indexmu80 = page.split("\n")[2]

            mu80_list_link = base_link + indexmu80
            print(mu80_list_link)
            # download(mu80_list_link)
            file.write(mu80_list_link+"\n")
            file.flush()
        file.close()



def save_link():
    # 连接到SQLite数据库
    # 数据库文件是lhrtest.db
    # 如果文件不存在，那么会自动在当前目录创建一个数据库文件:
    conn = sqlite3.connect('video_link.db')
    # 创建一个Cursor:
    cursor = conn.cursor()
    with open("video_link.txt", "r") as file:
        data = file.read()
        lines = data.split("\n")
        for l in lines:
            # 继续执行一条SQL语句，插入一条记录:
            sql = 'insert into links (link, status) values ("' + l +'",' + str(0) +')'
            cursor.execute(sql)
            # 通过rowcount获得插入的行数:
            print(cursor.rowcount)
    file.close()
    # 关闭Cursor:
    cursor.close()
    # 提交事务:
    conn.commit()
    # 关闭Connection:
    conn.close()


def update_db(id):
    conn = sqlite3.connect('video_link.db')
    # 创建一个Cursor:
    cursor = conn.cursor()
    sql = "update links set status=1 where id=" + id
    cursor.execute(sql)
    # 关闭Cursor:
    cursor.close()
    # 提交事务:
    conn.commit()
    # 关闭Connection:
    conn.close()


task_queue = []

threads_list = []
lock = Lock()


def call():
    while True:
        lock.acquire() #上锁
        task = task_queue.pop() #争夺task
        lock.release() #解锁
        try:
            download(task.link, task.task_id) #下载
            update_db(task.id)  # 跟新数据库状态
        except:
            print("download err\n")




def get_link_from_db_download():
    conn = sqlite3.connect('video_link.db')
    # 创建一个Cursor:
    cursor = conn.cursor()
    sql = 'select * from links where status=0'
    cursor.execute(sql)
    values = cursor.fetchall()
    for row in values:
        id = row[0]
        link = row[1]
        task_id = random.sample('zyxwvutsrqponmlkjihgfedcba',5)
        task = {
            task_id: task_id,
            id : id,
            link : link
        }
        task_queue.append(task)

    for i in range(0, 5):
        t = threading.Thread(target=call)
        t.setDaemon(True) #守护线程，让主线程等待子线程结束
        t.start()
        threads_list.append(t)


    for t in threads_list:
        t.join()


if __name__ == '__main__':
    # get_link_from_db_download()
    # =========应用==========
    data_size = 3030333  # 定义传输的数据，实际应用中这个值改一下就可以了
    recv_size = 0
    while recv_size < data_size:
        time.sleep(0.01)  # 模拟数据的传输延迟
        recv_size += 1024  # 每次收1024

        recv_per = int(100 * recv_size / data_size)  # 接收的比例
        progress("task_1",recv_per, width=50)  # 调用进度条函数，进度条的宽度默认设置为30
        progress("task_2", recv_per, width=50)  # 调用进度条函数，进度条的宽度默认设置为30
# if __name__ == '__main__':
#     base_url = "http://133925.com"
#     count = 0
#     for i in range(1, 6):
#         for sub_i in range(1, 10):
#             if i == 3:
#                 print("return")
#                 break
#             print(str(i) + "-" + str(sub_i))
#             if(sub_i == 1):
#                 process_link(base_url, "/type/"+ str(i) +".html")
#             else:
#                 process_link(base_url, "/type/" + str(i) + "-" + str(sub_i) + ".html")