# -*- coding:utf-8 -*-

import requests
import time
import os
import re
from multiprocessing import Process, JoinableQueue
from pyquery import PyQuery as pq


class Page(Process):
    def __init__(self, q):
        Process.__init__(self)
        self._q = q

    # 获取当前时间
    def getCurrentTime(self):
        return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))

    # 指定URL返回页面的html
    def getPageByURL(self, page_url):
        res = requests.get(page_url)
        res.encoding = 'gb2312'
        return res.text

    # 从页面中获取视频地址
    def getVideos(self):
        while True:
            url = self._q.get()
            page = self.getPageByURL(url)
            html = pq(page)
            title = html('h1#h1title').text()
            head = html('head').text()
            pattern = re.compile(u'{id:.*?file:.*?\'(.*?).mp4', re.S)
            match = re.search(pattern, head)
            if match:
                video_url = match.group(1) + '.mp4'
                print('{} 开始下载:{}'.format(self.getCurrentTime(), title))
                self.download(video_url, title)
                print('{}下载完成'.format(self.getCurrentTime()))

    # 从拿到的视频地址，存到本地
    def download(self, url, title):
        try:
            html = requests.get(url)
            video_name = 'video/{}.mp4'.format(title)
            with open(video_name, 'wb') as f:
                f.write(html.content)
        except requests.exceptions.MissingSchema:
            pass




    # 获取文章内容
    def run(self):
        self.getVideos()
        print('Page ok1')


class Spider(Process):
    def __init__(self, q, **kw):
        Process.__init__(self)
        self._q = q
        self._base_url = kw.get('base_url', None)

    # 获取当前时间
    def getCurrentTime(self):
        return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))

    # 指定URL返回页面的html
    def getPageByURL(self, page_url):
        res = requests.get('{}/{}'.format(self._base_url, page_url))
        return res.text

    # 获取当前页的文章列表
    def getNewsLists(self, page_url):
        page = self.getPageByURL(page_url)
        html = pq(page)
        news_lists = html('ul.list_img1')('li')
        for index, item in enumerate(news_lists.items()):
            href = item('a:first').attr('href')
            # title = item('a:eq(1)').text()
            self._q.put(href)
        print('获取到{}个页面'.format(index))

    def run(self):
        print('爬虫正在启动，开始爬去取在线视频')
        self.getNewsLists('list-734-1.html')


if __name__ == '__main__':
    q = JoinableQueue()

    s1 = Spider(q, base_url='http://www.lsol.com.cn')

    p1 = Page(q)

    s1.start()
    p1.start()
