import requests
import subprocess
import re
import os
from pprint import pprint
import time
import random
# pip install fake_useragent
from fake_useragent import UserAgent

'''
练习：B站视频资源爬虫
'''
class SpiderBilibiliVideo():
    def __init__(self):
        # self.url = 'https://www.bilibili.com/v/dance/otaku/?tag=-1'
        # https://s.search.bilibili.com/cate/search?main_ver=v3&search_type=video&view_type=hot_rank&copy_right=-1&new_web_tag=1&order=click&cate_id=20&page=4&pagesize=30&time_from=20230112&time_to=20230119
        self.data_url = 'https://s.search.bilibili.com/cate/search?main_ver=v3&search_type=video&view_type=hot_rank&copy_right=-1&new_web_tag=1&order=click&cate_id=20&page={0}&pagesize={1}&time_from={2}&time_to={3}'
        self.headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'accept-language': 'zh-CN,zh;q=0.9',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'same-origin',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': UserAgent().chrome
        }

    def main(self, pages, pagesize, time_from, time_to):
        print('开始执行B站视频资源爬虫，请稍后。。。')
        if pages <= 1:
            video_list = self.get_video_list(1, pagesize, time_from, time_to)
            if video_list is not None and len(video_list) > 0:
                for video_info in video_list:
                    # pprint(video_info)
                    video_url = video_info['arcurl']
                    video_title = video_info['title']
                    dir_name = './videos/case_05_test2/'
                    self.create_folder(dir_name)
                    # pip install you-get
                    # cmd = 'you-get -i '+url
                    cmd = 'you-get -format=dash-flv480 -o {output} -f {url}'.format(output=dir_name, url=video_url)
                    subprocess.call(cmd, shell=True)
                    print('视频{}下载完成！'.format(video_title))
                    time.sleep(random.randint(3, 5))


        else:
            for page in range(pages):
                video_list = self.get_video_list(page, pagesize, time_from, time_to)
                if video_list is not None and len(video_list) > 0:
                    for video_info in video_list:
                        pprint(video_info)
    def do_request(self, url):
        return requests.get(url, self.headers)

    '''获取视频列表'''
    def get_video_list(self, page, pagesize, time_from, time_to):
        self.data_url = self.data_url.format(page, pagesize, time_from, time_to)
        response = self.do_request(self.data_url)
        return response.json()['result']


    '''格式化标题，替换特殊字符，后面作为文件名输出'''
    def format_title(self, title):
        video_title = re.sub(r'【.*?】', '', title)
        # 在Windows操作系统中，文件名不能包含一些特殊字符，需要进行替换
        # video_title = re.sub(r'[\/:*?"<>|！❤️∇＼∀ε´͈;\n]', '_', video_title)
        # 提取中文
        video_title_list = re.findall(r'[\u4e00-\u9fa5]', video_title)
        video_title_result = ''
        for video_title_en in video_title_list:
            video_title_result = video_title_result + video_title_en
        return video_title_result

    '''创建文件输出目录'''
    def create_folder(self, dir_name):
        # 一级目录
        dir_name_first = './videos/'
        # 判断该文件夹是否存在
        if os.path.exists(dir_name_first) is False:
            # 不存在则创建
            os.mkdir(dir_name_first)
            os.mkdir(dir_name)
        elif os.path.exists(dir_name) is False:
            os.mkdir(dir_name)


if __name__ == '__main__':
    spider = SpiderBilibiliVideo()
    # 页数
    pages = 1
    # 每页查询条数
    pagesize = 10
    # 日期范围
    time_from = 20230112
    time_to = 20230119
    spider.main(pages, pagesize, time_from, time_to)
