# -*- coding:utf-8 -*-

import os
import re
import time
import requests
from flask import current_app
from logging.handlers import RotatingFileHandler
from bs4 import BeautifulSoup


class Page(object):
    def getPageByURL(self, url):
        res = requests.get(url)
        return res.text

    def getNewsVideo(self, page):
        soup = BeautifulSoup(page, 'html.parser')
        text = soup.select('head')
        pattern = re.compile(u'{id:.*?file:.*?\'(.*?).mp4', re.S)
        match = re.search(pattern, str(text))
        if match:
            return match.group(1) + '.mp4'

    # 获取当前时间
    def getCurrentTime(self):
        return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))

    def download(self, url, title):
        try:
            html = requests.get(url)

            video_name = 'video/{}.mp4'.format(title)

            current_app.logger.info('{} 开始下载'.format(self.getCurrentTime()))

            with open(video_name, 'wb') as f:
                f.write(html.content)

            current_app.logger.info('{} 开始完成'.format(self.getCurrentTime()))
        except requests.exceptions.MissingSchema:
            pass

    def getVideo(self, url, title):
        page = self.getPageByURL(url)
        video_url = self.getNewsVideo(page)
        self.download(video_url, title)
        print(video_url)


class Spider(object):
    def __init__(self):
        self.next_page = None
        self.page_spider = Page()

    # 获取当前时间
    def getCurrentTime(self):
        return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))

    # 通过传入的页码获取网页的HTML
    def getPageByNum(self, page_num):
        res = requests.get(self.getPageUrlByNum(page_num))
        return res.text

    # 通过页码构建网页的URL
    def getPageUrlByNum(self, page_num):
        return 'http://www.lsol.com.cn/{}'.format(page_num)

    # 获取到下页的页码
    def getNextPageNum(self, page):
        current_app.logger.info('{} 正在获取下一个页面，请稍后'.format(self.getCurrentTime()))
        page = self.getPageByNum(page)
        # pattern = re.compile(u'<title>(.*?)</title>')
        pattern = re.compile(u'<div id="pages".*<span>(.*?)</span>.*<a href="(.*?)" class="a1">下一页</a>')
        match = re.search(pattern, page)
        if match and match.group(1) != '4':
            current_app.logger.info('{} 获取到下一个有效页面{}'.format(self.getCurrentTime(), match.group(2)))
            return match.group(2)
        else:
            current_app.logger.info('{} 查不到下一页，已经到最后一页了'.format(self.getCurrentTime()))

    # 获取当前页的文章列表
    def getNewsLists(self, page_url):
        page = self.getPageByNum(page_url)
        soup = BeautifulSoup(page, 'html.parser')
        if soup:
            lists = soup.select('ul.list_img1 li')
            for i in lists:
                self.getNewsInfos(i)

    def getNewsInfos(self, item):
        pattern = re.compile(u'<a href="(.*?)".*?<img .*?alt="(.*?)".*?</a>', re.S)
        match = re.search(pattern, str(item))
        if match:
            url = match.group(1)
            title = match.group(2)
            current_app.logger.info('标题: {}, 链接: {}'.format(title, url))
            self.page_spider.getVideo(url, title)

        #    pass

    def run(self):
        if not os.path.exists('lsolpage.txt'):
            os.system(r'echo 1 > lsolpage.txt')

        with open('lsolpage.txt', 'r') as f:
            page = f.readline()

        current_app.logger.info('开始页码{}'.format(int(page.strip())))
        current_app.logger.info('爬虫正在启动，开始爬去取在线视频')
        current_app.logger.info('{} 正在获取视频个数，请稍后'.format(self.getCurrentTime()))

        # 获取页码，构建URL
        #self.next_page = self.getNextPageNum('list-734-0.html')
        self.getNewsLists('list-734-1.html')

        #while True:
        #    self.next_page = self.getNextPageNum(self.next_page)
        #    self.getNewsLists(self.next_page)
        #    if self.next_page is None:
        #        current_app.logger.info('最后一页, 停10秒')
        #        time.sleep(2)
        #    else:
        #        current_app.logger.info('准备下一页，停10秒')
        #        time.sleep(2)
