import scrapy
from tutorial.items import VarietyItem
import re


class VarietySpider(scrapy.Spider):
    name = "variety"
    allowed_domains = ["v.qq.com"]
    start_urls = [
        "http://v.qq.com/x/list/variety",
    ]

    def parse(self, response):
        for sel in response.xpath("//li[@class='list_item']"):
            tv_url = sel.xpath("div[1]/strong[@class='figure_title']/a/@href")\
                .extract_first()
            update = sel.xpath("a/div/span[@class='figure_info']/text()")\
                .extract_first()
            date = re.findall(r"\d+\.?\d*", str(update))
            dict = {'url': tv_url}
            dict['update'] = date
            item = VarietyItem()
            tv_id = 'variety' + str(tv_url).split('/')[-1].split('.')[0]
            item['variety_id'] = tv_id
            item['variety_name'] = \
                sel.xpath("div[1]/strong[@class='figure_title']/a/@title")\
                .extract_first()
            item['variety_attr'] = dict
            yield item
        # 寻找下一页
        next_page = response.xpath(
            "//div[@class='mod_pages']/a[@class='page_next']/@href")\
            .extract_first()
        if next_page is not None:
            next_page = response.urljoin(next_page)
            yield scrapy.Request(next_page, callback=self.parse)
