#!/usr/bin/env python
# -*-coding:UTF-8 -*-
'''
@Project ：爬虫-波波老师
@File：27-线程池梨视频.py
@Author ：文非
@Date：2021/3/1416:02
@Require： # 爬取梨视频的视频数据
'''
import re

import requests
from lxml import etree
import os
from multiprocessing.dummy import Pool

# 线程池处理的是阻塞且耗时的操作
def make_dir():
    if not os.path.exists("./梨视频MP4"):
        os.mkdir("./梨视频mp4")

def stroe(dic):
    make_dir()
    url = dic['url']
    name = dic['name']
    headers = dic['headers']
    print(dic['name'],"正在下载........")
    mp4 = requests.get(url=url,headers = headers)
    for name_in in name:
        with open("./梨视频mp4/" + name_in, "wb") as f:
            f.write(requests.get(url).content)
            print(name_in,"下载成功.....")


def main():
    # 指定首页url
    url = "https://www.pearvideo.com/category_4"
    # 对首页url发送请求
    response = requests.get(url=url)
    # 获取首页数据
    page_first = response.text
    # 实例化一个etree对象
    tree_first = etree.HTML(page_first)
    # 解析视频详情页的地址
    url_list = tree_first.xpath('//div[@class="category-top"]/div/ul/li')
    # print(url_list)

    urls=[]

    # 拼接视频详情页地址的url
    for url_detail in url_list:
        name = url_detail.xpath('./div/a/div[2]/text()')
        # print(name)
        url_detail = url_detail.xpath('./div/a/@href')
        # print(url_detail)
        contId = url_detail[0].split("_")
        headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
            'Referer': 'https://www.pearvideo.com/video_' + contId[1]
        }
        url_detail = 'https://www.pearvideo.com/videoStatus.jsp?contId=' + contId[1]
        # print(url_detail)
        detail_page = requests.get(url=url_detail, headers=headers).text
        cont = 'cont-' + contId[1]
        # 提取 mp4 视频
        srcUrl = re.findall(f'"srcUrl":"(.*?)"', detail_page)[0]
        # print(srcUrl)
        # 替换视频里面的时间戳，改为可以真正播放的数据
        new_url = srcUrl.replace(srcUrl.split("-")[0].split("/")[-1], cont)
        # print(new_url)
        # 使用视频后缀当视频名称
        filename = srcUrl.split("/")[-1]
        # 持久保存视频
        dic = {
            'name': name,
            'url': new_url,
            'headers': headers
        }
        urls.append(dic)
        # stroe(dic)

    pool = Pool(4)
    pool.map(stroe, urls)
    pool.close()
    pool.join()



if __name__ == "__main__":
    main()