import crawlertool as tool
from bs4 import BeautifulSoup
import re
import os
# pip install fake_useragent
from fake_useragent import UserAgent

'''
练习：重写虎牙视频数据爬虫
'''
class SpiderHuYaVideo():
    def __init__(self):
        self.url = 'https://v.huya.com/g/all'
        self.download_info_url = 'https://liveapi.huya.com/moment/getMomentContent?videoId={0}&uid=&_='
        self.headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'accept-language': 'zh-CN,zh;q=0.9',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'same-origin',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': UserAgent().chrome
        }

    def main(self, set_id, order, pages):
        self.url = self.url + '?set_id={0}&order={1}'.format(set_id, order)
        if pages <= 1:
            video_ids = self.get_video_ids()
            if len(video_ids) > 0:
                self.download(video_ids)
        else:
            for page_num in range(pages):
                self.url = self.url + '&page={0}'.format(page_num)
                video_ids = self.get_video_ids()
                if len(video_ids) > 0:
                    self.download(video_ids)

    def get_video_ids(self):
        video_ids = []
        response = tool.do_request(self.url, headers=self.headers)
        bs = BeautifulSoup(response.content.decode(errors="ignore"), 'lxml')
        video_elements = bs.select('.vhy-video-list > li')
        for video_element in video_elements:
            video_ids.append(video_element['data-vid'])
        return video_ids

    def get_video_info(self, video_id):
        video_info = {}
        download_info_url = self.download_info_url.format(video_id)
        response = tool.do_request(download_info_url, headers=self.headers)
        video_info_json = response.json()
        # 获取视频标题
        title = video_info_json['data']['moment']['title']
        title = re.sub(r'【.*?】', '', title)
        # 在Windows操作系统中，文件名不能包含一些特殊字符，需要进行替换
        title = re.sub(r'[\/:*?"<>|\n]', '_', title)
        # 获取视频地址
        video_url = video_info_json['data']['moment']['videoInfo']['definitions'][0]['url']
        video_info['title'] = title
        video_info['video_url'] = video_url
        return video_info

    def download(self, video_ids):
        for video_id in video_ids:
            video_info = self.get_video_info(video_id)
            response = tool.do_request(video_info['video_url'], headers=self.headers)
            dir_name = './videos/case_05_test/'
            # 一级目录
            dir_name_first = './videos/'
            # 判断该文件夹是否存在
            if os.path.exists(dir_name_first) is False:
                # 不存在则创建
                os.mkdir(dir_name_first)
                os.mkdir(dir_name)
            elif os.path.exists(dir_name) is False:
                os.mkdir(dir_name)
            with open(dir_name + video_info['title'] + '.mp4', mode='wb') as f:
                f.write(response.content)
                print('视频：{0}----------下载完成'.format(video_info['title']))


if __name__ == "__main__":
    spider = SpiderHuYaVideo()
    # 视频类型 51：舞蹈
    set_id = 51
    # 分类 hot：最热视频 new：最新发布 mostplay：最多播放
    order = 'hot'
    # 爬取页数
    pages = 1
    spider.main(set_id, order, pages)
