#!/usr/bin/python3

from urllib.parse import urlparse
import urllib.request
import re
import uapool
import requests
import sys
import getopt
import time
import os
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex


class VideoCrawler:
    """
    a crawler to get video with ali's m3u8
    """

    def __init__(self, cookie_str, ts_path, result_path):
        self.cookie_str = cookie_str
        self.ts_path = ts_path
        self.result_path = result_path
        self.key_value_dic = {}
        uapool.UA()
        cookie = ("cookie", self.cookie_str)
        headers = [cookie]
        uapool.set_header(headers)

    def set_ts_path(self, ts_path):
        self.ts_path = ts_path

    def set_result_path(self, result_path):
        self.result_path = result_path

    def get_course_list(self, course_url):
        """
        下载一个课程里所有视频
        :param course_url: 课程地址
        :return: 课程视频名称与地址列表
        """
        doc = urllib.request.urlopen(course_url).read().decode("utf-8", "ignore")
        # print(doc)
        pat_course_name = '<h2 class="title".*?>(.*?)[<>]'
        course_name = re.compile(pat_course_name, re.S).findall(doc)[0].strip()
        if len(course_name) > 0:
            course_name = course_name.replace(' ', '_').replace('/', '_')
        pat_course = '<li class="period(.*?)</li>'
        course_lis = re.compile(pat_course, re.S).findall(doc)
        pat_href = 'href="(.*?)"'
        pat_title = 'title">(.*?)</span>'
        course_list = []
        for course_li in course_lis:
            href = re.compile(pat_href, re.S).findall(course_li)[0]
            title = re.compile(pat_title, re.S).findall(course_li)[0].strip()
            title = title.replace(' ', '_').replace('/', '_')

            course = {'href': href, 'title': title}
            course_list.append(course)

        result = {'course_name': course_name, 'course_list': course_list}
        return result

    def down_one_video(self, video_url, file_name):
        """
        下载视频
        :param video_url: 视频地址
        :param file_name: 最终合并的文件名
        :return:
        """
        ts_list = self.get_ts_list(video_url)
        ts_file_paths = []
        redo_count = 0  # 用于记录下载出现错误是记录重试次数，顺利下载完后，次数归0
        for i in range(0, len(ts_list)):
            ts_url_item = ts_list[i]
            ts_path = self.ts_path + "/" + file_name.replace(".tx", '')
            try:
                ts_file_path = self.down_ts(ts_url_item, ts_path)
                if len(ts_file_path) > 0:
                    ts_file_paths.append(ts_file_path)
            except Exception as e:
                print("request error url:" + str(ts_url_item))
                print(e)
                # 提前结束本次循环，再次下载
                if redo_count < 5:
                    print('-------\t重试下载  ' + str(ts_url_item) + '  \t-------')
                    redo_count += 1
                    continue
                else:
                    print('[error] 多次重试后失败')
                    exit()

            i += 1
            redo_count = 0

        # 合并ts文件
        # file_name = self.get_video_file_name(video_url)
        result_file_path = self.result_path + os.path.sep + file_name
        merge_ts(ts_file_paths, result_file_path)

    def get_base_url(self, refer_url):
        """
        获取网址的根网址
        :param refer_url: 网址
        :return: 网址的根网址
        """
        o = urllib.parse.urlparse(refer_url)
        base_url = o.scheme + "://" + o.netloc
        return base_url

    def get_ts_list(self, video_url):
        """
        :param video_url: 视频地址
        :return: 返回ts分段视频信息列表
        """
        data = urllib.request.urlopen(video_url).read().decode("utf-8", "ignore")

        pat_pre_url = 'data-pre-url="(.*?)"'
        pre_url = re.compile(pat_pre_url, re.S).findall(data)[0]
        video_url_split = video_url.split("_")
        course_id = video_url_split[1]
        lesson_id = video_url_split[2].replace('#', '')
        player_url = pre_url + '/course/' + course_id + '/lesson/' + lesson_id + '/player';

        # pat_player_url = '<iframe src="(.*?)".*?id="viewerIframe"'
        # result = re.compile(pat_player_url, re.S).findall(data)
        # if len(result) == 0:
        #     print("[warn] 没有获取到player地址")
        #     print(data)
        #     return []

        # player_url = result[0]
        player_url = self.get_base_url(video_url) + player_url
        # print("player_url:" + player_url)

        print("访问 " + player_url)
        data = urllib.request.urlopen(player_url).read().decode("utf-8", "ignore")
        # print(data)
        pat_data_url = 'ata-url="(.*?)"'
        data_urls = re.compile(pat_data_url, re.S).findall(data)
        print('data_urls:')
        print(data_urls)

        if len(data_urls) == 0:
            print("data_urls 没有捞取")
            print(data)
            return []
        m3u8_url = data_urls[len(data_urls) - 1]
        if not m3u8_url.startswith('http'):
            print("--------m3u8_url: " + m3u8_url + "  , 非法url---------")
            return []
        data = urllib.request.urlopen(m3u8_url).read().decode("utf-8", "ignore")
        print("访问 " + m3u8_url + " 结果：")
        print(data)
        pat = '(http.*)'
        m3u8_urls = re.compile(pat).findall(data)
        print(m3u8_urls)
        if len(m3u8_urls) == 0:
            return
        m3u8_url = m3u8_urls[len(m3u8_urls) - 1]
        # print('访问m3u8url：' + m3u8_url)
        data = urllib.request.urlopen(m3u8_url).read().decode("utf-8", "ignore")

        if not os.path.exists(self.ts_path):
            os.makedirs(self.ts_path)
        with open(self.ts_path + "/" + str(time.time()) + '.m3u8', 'w') as m3u8_file:
            m3u8_file.write(data)

        # print(data)
        pat = '(EXT-X-KEY.*?\\.ts)'
        ts_urls = re.compile(pat, re.S).findall(data)
        # print(ts_urls)

        result = []
        for i in ts_urls:
            pat_ts_url = '(http.*?\\.ts)'
            pat_key_uri = 'URI="(.*?)"'
            pat_iv = 'IV=(.*)'
            ts_url = re.compile(pat_ts_url).findall(i)
            key_uri = re.compile(pat_key_uri, re.S).findall(i)
            iv = re.compile(pat_iv).findall(i)
            item = {'ts_url': ts_url[0], 'key_uri': key_uri[0], 'iv': iv[0]}
            result.append(item)

        # print('get_ts_list:')
        # print(result)
        return result

    def down_ts(self, ts_item, dir_path):
        """
        下载单个ts视频
        :param ts_item: ts文件访问信息 包含ts视频的url key地址 偏移量iv
        :param dir_path: 下载保存目录
        :return: ts文件路径
        """
        try:
            print("---------------ts信息-------------")
            print(ts_item)
            key_url = str(ts_item['key_uri'])
            key = ''
            if key_url in self.key_value_dic:
                key = self.key_value_dic[key_url]
            else:
                key = urllib.request.urlopen(key_url).read().decode("utf-8", "ignore")
                key = self.parse_key(key)
                self.key_value_dic[key_url] = key

            # print('key:' + str(key))
            iv = ts_item['iv']
            iv = iv.replace('0x', '')
            iv_array = a2b_hex(iv)
            sprytor = AES.new(key, AES.MODE_CBC, IV=iv_array)

            ts_url = ts_item['ts_url']
            # 获取ts文件二进制数据
            ts = urllib.request.urlopen(ts_url, timeout=20).read()
            # 密文长度不为16的倍数，则添加二进制"0"直到长度为16的倍数
            # while len(ts) % 16 != 0:
            #     ts += b"0"

            st = ts_url.split("/")
            file_name = st[len(st) - 1]
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            file_path = dir_path + "/" + file_name
            print('------------写入' + file_path + "----------------------")
            with open(file_path, 'wb') as f:
                f.write(sprytor.decrypt(ts))
            return file_path
        except Exception as e:
            print(e)
        return ""

    def parse_key(self, key):
        """
        :param key: key的密文
        :return: key的解密结果
        """
        key = bytes(key, "utf-8")
        se = ord("a")
        if len(key) == 20:
            r = key[0]
            i = chr(r).lower()
            a = self.chr_parse_int(i) % 7
            n = key[a]
            o = chr(n)
            s = key[a + 1]
            l = chr(s)
            u = self.str_parse_int(o + l) % 3
            if u == 2:
                d = key[8]
                h = key[9]
                c = key[10]
                f = key[11]
                p = key[15]
                g = key[16]
                v = key[17]
                y = key[18]
                m = d - se + 26 * (int(chr(h)) + 1) - se
                b = c - se + 26 * (int(chr(f)) + 1) - se
                E = p - se + 26 * (int(chr(g)) + 1) - se
                T = v - se + 26 * (int(chr(y)) + 2) - se
                result = [key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], m, b, key[12], key[13],
                          key[14], E, T, key[19]]
                return bytes(result)
            elif u == 1:
                result = [key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], key[18], key[16], key[15],
                          key[13], key[12], key[11], key[10], key[8]]
                return bytes(result)
            else:
                if u != 0:
                    pass
                result = [key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], key[8], key[10], key[11],
                          key[12], key[14], key[15], key[16], key[18]]
                return bytes(result)
        elif len(key) == 17:
            key = key[1:]
            result = [key[8], key[9], key[2], key[3], key[4], key[5], key[6], key[7], key[0], key[1], key[10], key[11],
                      key[12], key[13], key[14], key[15]]
            return bytes(result)
        else:
            return key

    def chr_parse_int(self, s):
        s_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
                  'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
        i_list = list(range(0, 36))
        for i in range(len(s_list)):
            if s == s_list[i]:
                return i_list[i]

    def str_parse_int(self, s):
        result = 0
        for i in range(len(s)):
            i = self.chr_parse_int(s[i])
            result = result * 36 + i
        return result

    # def get_video_file_name(self, video_url):
    #     doc = urllib.request.urlopen(video_url).read().decode("utf-8", "ignore")
    #     pat_item_label = 'class="item-label">(.*?<)'
    #     pat_lesson_number = 'data-role="lesson-number">(.*?)<'
    #     pat_item_title = 'class="item-title.*?>(.*?)<'
    #     item_label = re.compile(pat_item_label, re.S).findall(doc)
    #     lesson_number = re.compile(pat_lesson_number, re.S).findall(doc)
    #     item_title = re.compile(pat_item_title, re.S).findall(doc)
    #     file_name = str(item_label) + str(lesson_number) + str(item_title) + ".ts"
    #     return file_name


def merge_ts(ts_file_paths, result_file_path):
    # result_file_path = result_dir + os.path.sep + result_file_name
    result_file_dir = result_file_path[0:result_file_path.rindex('/')]
    if not os.path.exists(result_file_dir):
        os.makedirs(result_file_dir)
    with open(result_file_path, 'wb') as result_file:
        for ts_file_path in ts_file_paths:
            with open(ts_file_path, 'rb') as ts_file:
                content = ts_file.read()
                result_file.write(content)


def main():
    cookie_str = 'cna=QF6QFk2DsjMCAdRAQPbCK5kn; isg=BCAglLQWajf8ddZoQlO_iSAB8i7yKQTzrVOjVJox8zvFlcG_QjllgfGnLb0VPrzL; l=dB_kK_xlQL9rsj_2BOCNV5Q97m7ObIObYuoOBjUvi_5B5_8xvLbOo8LENEJ6csWcMW8B4zotpapTTeEb5Ppb95Umu8JctQ0HCef..; aliyun_choice=CN; UM_distinctid=16f6ab900fb448-0ea278739738de8-4c302a7b-240000-16f6ab900fc160; CNZZDATA1277657887=1028489629-1579007993-https%253A%252F%252Fwww.baidu.com%252F%7C1582599910; aliyun_lang=zh; acw_tc=0be3e0b115810583205356562ebde01ba4051f0569cced00369c633522a24a; CNZZDATA1261859658=918389689-1581055561-%7C1582605895; _ga=GA1.2.1379609896.1581064285; index_cookie=index; JSESSIONID=8409C3E293AC9FDFB228E0F989C91DC3; maliyun_temporary_console0=1AbLByOMHeZe3G41KYd5WUY6BymFVsSMq%2F2NauxvTDvzIqWUYpLmc5qJeUWsFDNNAzVLmH9RyQblM2oYq1BQMmkSydQYKbFxQeq6DZ%2FGD98H%2BDps9KXgIMsqQLjhKWLB%2BTHt7cBgu4N7iBTbHhm15w%3D%3D; hibext_instdsigdipv2=1; login_aliyunid_csrf=_csrf_tk_1311382593239199; ping_test=true; t=aab4e1866b0d4196ee46b9a3c79bb86c; _tb_token_=7053315131157; cookie2=19677b6a4ab208a42d32cd07fcfdd8a7; _samesite_flag_=true; _hvn_login=6; csg=a7f2c0ee; login_aliyunid=85707****@qq.com; login_aliyunid_ticket=*GayXbSieP__*yrFrrPO_Y1ZC5LqJPq7msBJ_kspof_BNTwUhTOoNC1ZBeeMfKJzxdnb95hYssNIZor6q7SCxRtgmGCbifG2Cd4ZWazmBdHI6sgXZqg4XFWQfyKpeu*0vCmV8s*MT5tJl3_1$$wS24Ay0; login_aliyunid_luid=BG+TOH3jZ4y5816ca705a5a3bf6b80fdb1b41246591+kbd66KGd3xxzrsjQmwPzszeeEV31tzG+aLlajfkP; login_aliyunid_abi=BG+82u7hN9425fbae283696e63cd9996e18264e5af5+hL1LujYGGvPhVzyJkY1suH0dvOWfHWMxZdVPXKBKV31WzWl2gvc=; login_aliyunid_pk=1847035820306422; login_aliyunid_pks=BG+Yq22TZCZ74R10RoGocM06LuleLNnl53aYUCowuCUDPk=; hssid=1fb7Q-apw1sg6MMY6-vC9Eg1; hsite=6; aliyun_country=CN; aliyun_site=CN; promotion_temporary_console0=1AbLByOMHeZe3G41KYd5WaNFzVX3TYuBg5KS%2FdUvOmgu2KS4T6UtSxn8pD1O70n%2BXtN7y4Dh2QeYn0SMsJReYBtUNUHVkBHFXim99hxE3%2BG54LdQxb%2F1SzC2diYLyVNT; aliyungf_tc=AQAAAG4LhQpuSwcAI6sPanT6FOXVLvAa; PHPSESSID=ffn19otlovnlseak93q2rpih60; SERVERID=01f62e4815b343dbafa378f0b15800ac|1582606978|1582606973; c_csrf=016f60dd-7f8f-4119-837e-d2f73d782e5a; ext_pgvwcount=0.9'

    down_path = '/home/long655113/download'
    course_url = 'https://developer.aliyun.com/course/1895'
    offset = 0
    opts, args = getopt.getopt(sys.argv[1:], "h", ["dir=", "course_url=", "offset="])

    for o, a in opts:
        if o == '-h':
            print("down_ali_course.py --dir= --course_url=https://edu.aliyun.com/course/85 --offset=")
            exit()
        if o == '--dir':
            down_path = a
        if o == '--course_url':
            course_url = a
        if o == '--offset':
            offset = int(a)

    ts_path = down_path + "/tx"
    result_path = down_path + "/result"

    print("ts_path=" + ts_path + ",result_path=" + result_path + ",course_url=" + course_url + ", offset=" + str(offset))

    crawler = VideoCrawler(cookie_str, ts_path, result_path)
    result = crawler.get_course_list(course_url)
    # result = {'course_name': course_name, 'course_list': course_list}
    course_list = result['course_list']
    course_name = result['course_name']
    ts_path = down_path + "/" + course_name + "/ts"
    result_path = down_path + "/" + course_name
    crawler.set_ts_path(ts_path)
    crawler.set_result_path(result_path)
    print('------------------course_list-----------')
    print(result)
    base_url = crawler.get_base_url(course_url)
    for i in range(offset, len(course_list)):
        course = course_list[i]
        video_url = base_url + course['href']
        file_name = course['title'] + ".ts"
        file_name = file_name.replace(' ', '_')
        crawler.down_one_video(video_url, file_name)

    # video_url = "https://edu.aliyun.com/lesson_1780_14862#_14862"
    # file_name = '课时57：小游戏开发_游戏的欢迎信息' + '.ts'
    # crawler.down_one_video(video_url, file_name)


if __name__ == "__main__":
    main()

# indexUrl = "https://developer.aliyun.com/edu/course/1696/lesson/13533/player"
# urllib.request(indexUrl)