# -*- coding:utf-8 -*-
"""
@author zyx
@since 2022/2/19 10:12
@file: multi_download.py
@desc: 基于you_get 的b站视频下载
@dependency: pip install you-get
"""

import sys
from you_get import common as you_get
from multiprocessing import Pool
from bs4 import BeautifulSoup

# 进程数
threads = 2
# 路径自己定义
directory = r'D:\test'
base_url = "https://www.bilibili.com/video/BV1TR4y1H7iG/?p={}"


def get_urls(p_start, p_end):
    """
    获取所有需要下载的url
    :param p_start: 开始页面(包含)
    :param p_end: 结束页面(包含)
    :return: 下载列表
    """
    return [base_url.format(str(i)) for i in range(p_start, p_end + 1)]


def download(url_list):
    print("downloading: {}".format(url_list))
    # 指定 cookies
    sys.argv = [
        'you-get', '-o', directory, '-c', 'cookies.txt', '--no-caption', url_list
    ]
    # sys.argv = ['you-get', '-o', directory, '--debug', '--no-caption', url_list]
    # sys.argv = ['you-get', '-o', directory, '--no-caption', url_list]
    you_get.main()


def parse_and_get_urls(file_path):
    # 读取 HTML 文件
    with open(file_path, 'r', encoding='utf-8') as f:
        content = f.read()

    # 使用 BeautifulSoup 解析 HTML
    soup = BeautifulSoup(content, 'lxml')

    # 查找所有带有 data-key 属性的元素
    data_keys = [elem['data-key'] for elem in soup.find_all(attrs={'data-key': True})]

    return [f"https://www.bilibili.com/video/{d}" for d in data_keys]


if __name__ == '__main__':
    # urls = get_urls(1, 5)
    urls = parse_and_get_urls("source.xml")
    pool = Pool(threads)
    pool.map(download, urls)
    pool.close()
