# -*- coding: utf-8 -*-
import datetime
import socket
from urllib.parse import urljoin

from scrapy.linkextractors import LinkExtractor
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
from scrapy.spiders import CrawlSpider, Rule

from mansite.items import MansiteItem


class IdeapocketSpider(CrawlSpider):
    name = 'ideapocket'

    allowed_domains = ['m.nanrenvip.cc']
    start_urls = ['http://m.nanrenvip.cc/ideapocket/']

    '''
    在rules里定义两条Rule规则，一条是获取响应页面规则，一条是获取翻页链接规则，翻页的规则跟在获取响应页面规则下面，顺序不要搞错了，如下：

    rules = (
        Rule(LinkExtractor(allow=r'/web/site0/tab5240/info\d+\.htm'), callback='parse_item'),
        Rule(LinkExtractor(allow=r'http://bxjg.circ.gov.cn/web/site0/tab5240/module14430/page\d\.htm'),follow=True),

    )
    https://blog.csdn.net/joe8910/article/details/85159059 
    '''
    rules = (
        Rule(LinkExtractor(allow="ideapocket/*"), callback='parse_item'),
        Rule(LinkExtractor(restrict_xpaths='//span[contains(@class,"right")]/a[1]'), follow=True),
    )

    def parse_item(self, response):
        selectors = response.xpath('//li[@class="book-li"]')
        self.log('response %s return record items: %d' % (response.url, len(selectors)))

        # 去除文本前后空格
        strip_fun = lambda i: i.strip()
        # 连接两个url
        joinurl_fun = lambda i: urljoin(response.url, i)

        for selector in selectors:
            ld = ItemLoader(item=MansiteItem(), selector=selector)

            # 输出到Item
            ld.add_xpath('title', './/h4[@class="book-title"]/text()', MapCompose(strip_fun))
            ld.add_xpath('desc', './/p[@class="book-desc"]/text()', MapCompose(strip_fun))
            ld.add_xpath('image', './/a/img/@data-echo', MapCompose(joinurl_fun))
            ld.add_xpath('duration', './/span/em[1]/text()', MapCompose(strip_fun))
            ld.add_xpath('type', './/span/em[2]/text()', MapCompose(strip_fun))
            ld.add_xpath('pubDate', './/span/em[3]/text()', MapCompose(strip_fun))
            ld.add_xpath('detailUrl', './/a[@class="book-layout"]/@href', MapCompose(joinurl_fun))

            ld.add_value('url', response.url)
            ld.add_value('server', socket.gethostname())
            ld.add_value('project', self.settings.get('BOT_NAME'))
            ld.add_value('timestamp', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

            item = ld.load_item()
            self.log('parse item for return %s' % item)
            yield item
