# coding=utf-8
import os
import requests
import urllib.parse
from lxml import etree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import multiprocessing

xp_href = "//div[@class='item']//a/@href|//div[@class='item  ']//a/@href"
xp_video_reg = "//video/@src"


class SpiderGreen(object):
    def __init__(self):
        # 创建保存文件夹
        self.downloadDir = "./movies"
        if os.path.exists(self.downloadDir) == False:
            os.mkdir(self.downloadDir)
        # 保存此次下载的文件信息
        self.downloadInfo = {}
        # 想爬取的个数
        self.max_count = 0
        # 发现的连接名称
        self.setHref = set([])
        # 下载的队列
        self.htmlQueue = []
        # 从连接中获得的视频
        self.setUrl = set([])
        # 谷歌chromeDriver设置请求头 ，并打开无界面浏览器加载动态js网页
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument(
            'User-Agent="Mozilla/5.0 (iPod; U; CPU iPhone OS 2_1 like Mac OS X; ja-jp) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5F137 Safari/525.20"')
        self.driver = webdriver.Chrome(chrome_options=chrome_options)
        # 设置requests请求头
        self.headers = {
            "User-Agent": "Mozilla/5.0 (iPod; U; CPU iPhone OS 2_1 like Mac OS X; ja-jp) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5F137 Safari/525.20"}

    # 保存的视频信息
    def save_video(self, src):
        print("saving... ==>", src)
        self.setUrl.add(src)

    # 从第一个界面中获得视频链接地址
    def find_videoInfo(self):
        while len(self.setUrl) < self.max_count:
            url = self.htmlQueue.pop()
            if url:
                print('analysis:', url)
                html = self.downloadHtml(url)
                self.analysisHtml(html)

    def downloadHtml(self, url):
        self.driver.get(url)
        return self.driver.page_source

    def analysisHtml(self, html):
        tree = etree.HTML(html)
        # 查找视频信息
        videoInfo = tree.xpath(xp_video_reg)
        # 保存视频信息
        for video in videoInfo:
            self.save_video(video)
        # 查找新的非重复本域名标签
        hrefInfo = tree.xpath(xp_href)
        for href in hrefInfo:
            href = urllib.parse.urljoin(self.seedurl, href)
            if href not in self.setHref:
                self.setHref.add(href)
                self.htmlQueue.append(href)

    def find_video_src(self, url):

        pass

    def _download(self, url):
        print("start write to file", url)
        r = requests.get(url, headers=self.headers, stream=True)
        with open(os.path.join(self.downloadDir, url.split('/')[-2]), 'wb') as f:
            for chunk in r.iter_content(chunk_size=1024):
                f.write(chunk)
        print("download success", url)

    def mutiDownload(self, urls):
        pool = multiprocessing.Pool(10)
        for url in urls:
            pool.apply_async(self._download(url))

        pool.close()
        pool.join()

    def start(self, seed_url, maxCount):

        self.seedurl = seed_url

        self.max_count = maxCount

        self.htmlQueue.append(seed_url)

        self.find_videoInfo()

        self.mutiDownload(self.setUrl)


s = SpiderGreen()
s.start("http://dyz44.com/latest-updates/", 3)
