# -*- coding: utf-8 -*-
import scrapy
import redis
from meiwenting.items import MeiwentingItem


class MeiwentingClassifySpider(scrapy.Spider):
    """
        docstring here
        :param scrapy.Spider:
    """
    name = 'meiwenting_classify'
    allowed_domains = ['www.meiwenting.com']
    start_urls = ['http://www.meiwenting.com/']
    redis_pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
    myredis = redis.Redis(connection_pool=redis_pool)
    def parse(self, response):
        """
        docstring here
            :param self:
            :param response:
        """
        classfiy_title = []
        classfiy_link = []
        for sel in response.xpath('//div[@class="guide"]//ul[@class="f5"]/li'):
            classfiy_title.append(sel.xpath('a/text()').extract_first())
            classfiy_link.append(sel.xpath('a/@href').extract_first())
        
        for i, link in enumerate(classfiy_link):
            self.myredis.lpush('CACHE_MEIWENTING_CLASSFIY_LINK', link)
            # yield scrapy.Request(url=link, callback=self.getclassfiy)

    def getclassfiy(self, response):
        """
        docstring here
            :param self:
            :param response:
        """
        for sel in response.xpath('//ul[@class="e2"]/li'):
            item = MeiwentingItem()
            item['title'] = sel.xpath('a/text()').extract_first()
            item['classify'] = sel.xpath(
                'span[@class="info"]/a/text()').extract_first()
            item['date'] = sel.xpath(
                'span[@class="info"]/text()').extract()[1].replace("时间：", "")
            item['intro'] = sel.xpath(
                'p[@class="intro"]/text()').extract_first()
            item['link'] = sel.xpath('a/@href').extract_first()
            item['post_id'] = item['link'].split('/')[-1].replace('.html', '')
            item['post_type'] = sel.xpath(
                '//div[@class="place"]/a/text()').extract()[-1]
            yield item
        next_page = response.xpath(
            '//ul[@class="pagelist"]/li[last()-1]/a/@href').extract_first()
        url = response.urljoin(next_page)
        yield scrapy.Request(url=url, callback=self.getclassfiy)
