import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from one.items import DouItem
from one.pipelines import DouPipeline
from scrapy import log
from scrapy_splash import SplashRequest

# log.msg("This is a warning", level=log.SILENT)
# http://www.beiwo.tv/vod/34051/
class Douspider(scrapy.Spider):
    name = "dou"
    allowed_domain = ['beiwo.tv']
    start_urls = [
        "http://www.beiwo.tv"
    ]

    def start_requests(self):
        urls = []
        for num in range(1, 50):
            url = 'http://www.beiwo.tv/list/' + str(num)
            page = scrapy.Request(url, callback=self.parse_item)
            urls.append(page)
        return urls

    # rules = {
    #     Rule(LinkExtractor(allow=('\/vod\/', )), callback='parse_item')
    # }

    def parse_item(self, response):
        # print(response.xpath('//li//a[@class="play-img"]').extract())
        url_strs = []
        for urls in response.xpath('//li//a[@class="play-img"]'):
            vodItem = urls.xpath('@href').extract()[0]
            url_strs.append(SplashRequest('http://www.beiwo.tv' + vodItem, self.parser_vod, args={'wait': 0.5}))
            # url_strs.append(scrapy.Request('http://www.beiwo.tv' + vodItem, callback=self.parser_vod))
        return url_strs

    def parser_vod(self, response):
        # print(response.body)
        downArr = []
        for a in response.xpath('//div[@class="downlist"]//ul//li//span//a[@class="d1"]'):
            thunderUrl = a.xpath('@href').extract()[0]
            thunderName = a.xpath('@mc').extract()[0]
            thunderType = a.xpath('text()').extract()[0]
            str = thunderType + "-LH-" + thunderName + '-LH-' + thunderUrl
            downArr.append(str)
        downloads = '-YP-'.join(downArr)
        #
        title = response.xpath('//div[@class="endpage clearfix"]//h1/text()').extract()[0];
        desc = response.xpath('//div[@class="textdesc"]/text()').extract()[0]
        cover = response.xpath('//div[@class="pic"]//img/@src').extract()[0]
        downloadtime = response.xpath('//div[@id="main"]//li/text()').extract()[0]
        updatetime = response.xpath('//div[@id="main"]//li/text()').extract()[-1]

        item = DouItem()
        item['html'] = response.url
        item['title'] = title
        item['content'] = desc
        item['cover'] = cover
        item['onlinetime'] = downloadtime
        item['updatetime'] = updatetime
        item['thunders'] = downloads
        DouPipeline().proccess_item(item, self)

