import requests
from bs4 import BeautifulSoup
import pymysql
import os
import json
import logging
import requests
import datetime

# 获取cookie请登陆： https://auth.dxy.cn/accounts/login
list_url = 'https://mama.dxy.com/japi/platform/200920024?columnId=3399728840175130024&pageNo=1&pageSize=100'
m3u8_url = 'https://mama.dxy.com/japi/platform/200920023?columnId=3399728840175130024&id='

headers = {
    "cookie": "DXY_TRACE_ID=tKkeC8zjPGwXjC3pXbVNWG7PcfKiJdQ0; CHD_BROWSER_RANDOM=eyJ0aW1lIjoxNjM1MzEyNzY5LCJkYXRhIjoibU52SHIwVWJHTXlla0VjZ1B3TzVFUTZaWXFGNi81NmdpQjdnWGU3dkJZQU56N0dLbGl0eEtFdzZENzk0djQwSk5RVEUwdWtBTzFPbDNlQzZIL09JTXJhaFhNcllDdTRva2pSWk0xdXB5WEMvSlpiMmR5TXlaT29oSDRFTHJyNk1CWm1ybHB3cHczQWFIY3A1blczTVdpR0JNRWJEOUtRK0RRUXFxQ3NwVHhyUStWbFNsVThjZU5FWHQyZE16aXpGM0dJVnQ3eTJPN1JVdHFIWEN0LzFKaWZQc1VoSE5ycWk3Y21wYjB1bCttUGVSdElVN0dqWFpvSTZhK2s3S1ZmdXZUbEptcVpVOXA5RUlVQUR1TGI5bVRPQ1hkUG9JaFRKSmJDNmhBMStkN0FoamY3dVRJK0VyUGJVMTdKYllBQm02N3hIZWFndFR3REREdUxGNGlGcDRPc0NvOGF6WW13TFlrZm8xZitKY20zc2RFTFoyVThpQ2svSFIyMEdOODl1a3BIdEtQS2RIVjRpOXR0bm9UUURqQT09Iiwic2lnbiI6IjkwYzc0M2FhYzRiNWNmNTFkM2EzZDQ3OTUwMTI4ODUyMjk1ZDY1MDEifQ; dxy_da_cookie-id=e3ab0ee3bdde46fbb0da83b4bedff3721635312746246; DXY_CHD_SESSION=eyJhIjoxMjIxNzgyNDEyLCJ0IjoxNjM1Mzk5MjIyLCJuIjoiY3hLUlRuN2VBanVrbVhqeiIsImQiOiJ7XCJhdHRyaWJ1dGVzXCI6e1wic3NvXCI6XCJkeHlfY2I3cjRub3dcIixcInZcIjowLFwibUlkXCI6MzQ5MDYzMTgwMTQ4MzUxNjkzNX0sXCJpZFwiOjM0NTM1NDM1NTE5OTY5NjAzMDAsXCJ1c2VybmFtZVwiOlwi5p2o56OK4oShwrnigbfigbbigbDigbDigbjigbDigbXCs-KBsOKBuFwiLFwibWFya3NcIjoyNTI4MixcIm1vbVwiOjIyMDE3MTAyNX0iLCJzIjoiOGQ3MGQ0MzkwYjc5MzcyZmRhMzFjMzEyNzg2Yzk3NmYwMmY5YTZhNSJ9",
    "referer": "https://mama.dxy.com/client/column-buyed/3399728840175130024",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,"
              "application/signed-exchange;v=b3;q=0.9",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/"
                  "89.0.4389.90 Safari/537.36"
}


class xiazai():
    def __init__(self,url):
        self.url = url
        work_dir = os.getcwd()
        # print(work_dir)
        # 用来保存ts文件
        file_dir = os.path.join(work_dir, 'file_tmp')
        if not os.path.exists(file_dir):
            os.mkdir(file_dir)
        self.headers ={
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'
        }
        self.savefile(self.url)

    def savefile(self, file_url):

        r = requests.get(file_url, headers=self.headers)
        # 合成带有hls的m3u8地址
        if r.text.split('\n')[-1] == '':
            hls_mark = r.text.split('\n')[-2]  # 以防\n结尾
        else:
            hls_mark = r.text.split('\n')[-1]
        url_m3u8_hls = file_url.replace('index.m3u8', hls_mark)
        file_m3u8 = url_m3u8_hls.split('/')[-1]
        self.duqu(url_m3u8_hls)
        #print(url_m3u8_hls)

    def duqu(self,url_m3u8_hls):
        r = requests.get(url_m3u8_hls, headers=self.headers).text
        text_bytes = r.split('\n')
        # 筛选以.ts结尾的行
        # 有些情况下可能是以其他格式的文件，比如png，下载后修改后缀即可
        # ts_name = [i for i in text_string if i.endswith('.ts')]
        ts_name = [i for i in text_bytes if i.startswith('#EXTINF')]
        ts_neirong = [i for i in text_bytes if not i.startswith('#')]
        ts_neirong.pop()
        self.xiazai(ts_neirong,url_m3u8_hls)
        # print(ts_neirong)

    def xiazai(self,ts_neirong, url_m3u8_hls):

        for i in range(len(ts_neirong)):
            hls_mark = url_m3u8_hls.split('/')[-1]
            url_xiazai = url_m3u8_hls.replace(hls_mark, ts_neirong[i])
            print(url_xiazai)
            r = requests.get(url_xiazai, headers=self.headers)
            with open('file_tmp/'+ts_neirong[i], 'wb') as f:

            f.write(r.content)
            f.close()


if __name__ == '__main__':
    try:
        resp = requests.get(list_url, headers=headers, timeout=30).text
        json_obj = json.loads(resp)
        i = 1
        for item in json_obj['results']['items']:
            id = item['id']
            type = item['type']
            title = item['title']

            m3u8_resp = requests.get(m3u8_url+str(id), headers=headers, timeout=30).text
            m3u8_obj = json.loads(m3u8_resp)
            m3u8 = m3u8_obj['results']['item']['resource']

            if(type == 103):
                print("课程" + "{:0>2d}".format(i) + "（音频）" + title)
                print(m3u8)
                print("====================================================")
                i+=1
            if(type == 104):
                print("课程" + "{:0>2d}".format(i) + "（视频）" + title)
                print(m3u8)
                xiazai(m3u8)
                print("====================================================")
                i+=1
    except:
        print("ERROR!!!")


