# coding: utf-8
import requests
import parsel
import re
import urllib3
import time
import json
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import random

urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings()


def matchValue(reStr, sourceContent, matchIndex):
    pat = re.compile(reStr) #用()表示1个组，2个组
    m = pat.search(sourceContent)
    if m == None:
        return 
    return m.group(matchIndex) #默认为0，表示匹配整个字符串



def build_content(datas):
    content = ''
    for data in datas:
        content += f"### {data[0]} \n 视频在线地址：[在线地址]({data[1]}) \n ![]({data[2]}) \n"
    return content

def batch_load(total_page):
    file_name = "./hsck/hsck{}.md"
    
    count = 0
    file_content = ''
    for i in range(total_page):
        if i >= 0:
            file_content += build_content(get_single_page(pageNum=(i + 1)))
            print(f"第{i+1}页")
            if (i + 1) % 10 == 0 or total_page == i + 1:
                count += 1
                # print(file_content)
                with open(file_name.format(count), 'a+', encoding='utf-8') as f:
                    f.write(file_content)
                    file_content = ''
            time.sleep(0.5) 
            
def get_single_page(url, title):
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 13; MEIZU 18s Build/TKQ1.221114.001) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.5563.116 Mobile Safari/537.36"
    }
    response = requests.get(url=f"{url}", headers=headers, verify=False)
    response.encoding='utf-8'
    selector1 = parsel.Selector(response.text)
    header_contents = selector1.css('.video script::text')
    for content in header_contents:
        contentStr = content.get()
        # print(contentStr)
        if 'var options' in contentStr:
            # print(contentStr)
            temp_data = matchValue('var options\s+\S+\s((.+));', contentStr, 1)
            
            # print(temp_data)
            json_data = json.loads(temp_data)
            video_url = json_data['readyVideoUrl']
            print(title, json_data['readyVideoUrl'])
            video_content = requests.get(url=video_url,headers=headers).content

            # 创建mp4文件，写入二进制数据
            with open (title+".mp4", mode = "wb") as f :
                f.write(video_content)
                
                
def build_player_url(aid, vid, cid, is_vod=True):
    ts_url = "https://api.bilibili.com/x/click-interface/click/now"
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 13; MEIZU 18s Build/TKQ1.221114.001) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.5563.116 Mobile Safari/537.36"
    }
    ts_response = requests.get(ts_url, headers=headers)
    ts_json = ts_response.json()
    wts = ts_json['data']['now']
    w_rid = str(uuid.uuid4()).replace('-', '')
    vid_str = f'bvid={vid}' if is_vod else f'avid={aid}'
    url =  f'''
            https://api.bilibili.com/x/player/wbi/playurl?{vid_str}&cid={cid}&qn=0&fnver=0&fnval=4048&fourk=1&gaia_source=external-link&from_client=BROWSER&is_main_page=false&need_fragment=false&isGaiaAvoided=true&session=160081202c9a2b89261bd261cb1a967c&voice_balance=1&web_location=1315873&w_rid={w_rid}&wts={wts}
            '''
    return url.replace('\n', '')
    
                
def get_single_page(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 13; MEIZU 18s Build/TKQ1.221114.001) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.5563.116 Mobile Safari/537.36"
    }
    response = requests.get(url=f"{url}", headers=headers, verify=False)
    response.encoding='utf-8'
    selector1 = parsel.Selector(response.text)
    open("test-player.html", mode='w', encoding='utf-8').write(response.text)
    
                
def get_player_info(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 13; MEIZU 18s Build/TKQ1.221114.001) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.5563.116 Mobile Safari/537.36"
    }
    response = requests.get(url=f"{url}", headers=headers, verify=False)
    response.encoding='utf-8'
    selector1 = parsel.Selector(response.text)
    contents = selector1.css('script[data-vue-meta="true"][type="application/ld+json"]::text')
    print(contents)
    for content in contents:
        json_data = json.loads(content.get())
        if json_data['@type'] == 'VideoObject':
            print(json_data)
            return json_data['embedUrl']
    return None
    
def get_header():
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 13; MEIZU 18s Build/TKQ1.221114.001) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.5563.116 Mobile Safari/537.36"
    }
    return headers
        
def get_page_info(url):
# url = f'http://367hsck.cc/'
    headers = get_header()
    response = requests.get(url=f"{url}", headers=headers, verify=False)
    response.encoding='utf-8'

    selector1 = parsel.Selector(response.text)
    # contents = selector1.css('.m-video-player > .player-container')
    scripts = selector1.xpath('//script/text()').getall()
    title = selector1.css('title::text').get().replace('|', '').replace('/', '_')
    print(title)
    if not os.path.exists(f'./{title}'):
        os.mkdir(f'./{title}')
    # print(scripts)
    index = 1
    datas = []
    for content in scripts:
        # content = contents[3]

        # print(content)
        # pic_info = content
        # contentStr = pic_info.get()
        
        if content.startswith('window.__INITIAL_STATE__='):
            temp_data = content.replace('window.__INITIAL_STATE__=', '')
            print("++++++++++++++++++++++++\n")
            # print(temp_data)
            temp_data = matchValue(r'^\s*({.*?})\s*;', temp_data, 1)
            # print(temp_data)
            json_data = json.loads(temp_data)
            viewInfo = json_data['video']['viewInfo']
            # print("viewInfo:", viewInfo)
            pages = viewInfo['pages']
            aid = viewInfo['aid']
            bvid = viewInfo['bvid']
            for page in pages:
                cid = page['cid']
                vod_player_url = build_player_url(aid=aid, vid=bvid, cid=cid, is_vod=False)
                response = requests.get(url=vod_player_url, headers=headers)
                print(vod_player_url)
                json_data = response.json()
                print(json_data)    
                print(url)
                # 下载视频
                data = download_m4s_file(page=page, vod_json=json_data, save_path=title)
                data = download_m4s_file(page=page, vod_json=json_data, save_path=title,is_vod=False)
                datas.append(data)
        index += 1
    return datas,title

def download_m4s_file(page, vod_json, save_path, is_vod=True):
    max_id = 0
    video_url = None
    part_title = page['part']
    videos = vod_json['data']['dash']['video']
    file_name = f"{part_title}"
    if not is_vod:
        print(vod_json['data']['dash']['audio'])
        videos = vod_json['data']['dash']['audio']
        file_name = f"{part_title}_aduio"
    else:
        print(vod_json['data']['dash']['video'])
        
    
    video_url = videos[len(videos) - 2]['baseUrl']
    max_id = videos[len(videos) - 2]['id']
    print(part_title, max_id, video_url)
    time.sleep(0.5) 
    downloadOneFile(video_url, save_path, f"{file_name}", rerfer_url=url)
    return (part_title, video_url)

def downloadOneFile(url, fileSavePath, name, rerfer_url, max_retries=3):
        retries = 0
    #     headers = {
    #     "User-Agent": "Mozilla/5.0 (Linux; Android 13; MEIZU 18s Build/TKQ1.221114.001) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.5563.116 Mobile Safari/537.36"
    # }   
        headers = {
    "Accept": "*/*",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    # "If-Range": "\"3a3cedb8438a2ea171ddfb281aa306e6\"",
    "Origin": "https://www.bilibili.com",
    # "Range": "bytes=903108-961230",
    "Referer": f"{rerfer_url}",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "cross-site",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Google Chrome\";v=\"137\", \"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\"",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36"
}
        file_name = f"{fileSavePath}/{name}.m4s"
        if os.path.exists(file_name):
            print(f"{file_name} has exists")
            return
        
        while retries < max_retries:
            sleep_time = random.uniform(0, 2)
            try:
                # 让程序暂停指定的随机时间
                time.sleep(sleep_time)
                video_response = requests.get(url=url,headers=headers)
                if video_response.status_code == 200:
                    video_content = video_response.content
                    with open (file_name, mode = "wb") as f :
                        f.write(video_content)
                    print(f"程序将暂停 {sleep_time:.2f} 秒")

                    
                else:
                    print(video_response.status_code)
                    raise Exception
                break
            except Exception as e:
                print(f"downloadOneFile error, retrying ({retries + 1}/{max_retries}): {e}, name: {name}")
                retries += 1
                time.sleep(1)  # 适当延迟重试
        
if __name__ == "__main__":
    # 第三季
    #base_url = "https://www.bilibili.com/video/BV1uz421C7Ss"
    # 第一季
    # base_url = "https://www.bilibili.com/video/BV1mE42137KN"
    # 第二季
    # base_url = "https://www.bilibili.com/video/BV1qM4m1C7EB"
    base_url = "https://www.bilibili.com/video/BV1zwBBY6EHQ"
    # 第三季
    # url = f'{base_url}/?spm_id_from=333.999.0.0&vd_source=822695a88279c29d1d77cff2810689ad'
    # 第二季
    # url = f'{base_url}/?spm_id_from=333.1007.0.0&vd_source=822695a88279c29d1d77cff2810689ad'
    url = f'{base_url}/?spm_id_from=333.337.search-card.all.click&vd_source=822695a88279c29d1d77cff2810689ad'
    # player_info = get_player_info(url=url)
    # print(player_info)
    # next_url = get_player_info(url=url)
    # print(next_url)
    # get_single_page(next_url)
    pages, title = get_page_info(url)
    if not os.path.exists(str(title)):
        os.mkdir(str(title))
    open('urls.json',mode='w+', encoding='utf-8').write(json.dumps(pages, ensure_ascii=False))
    # with ThreadPoolExecutor(max_workers=10) as threadPool:
    #     for i in tqdm(range(len(pages)), desc='video download Processing'):
    #         page = pages[i]
    #         print(page)
    #         threadPool.submit(get_single_page, f'{base_url}?p={page[0]}', f'{title}/{page[1]}')
    #         time.sleep(0.5)
    # batch_load(559)
    # get_single_page(url, '第一集')
    # batch_download()