# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader import ItemLoader
from mydmoz.items import MydmozItem
import re
from scrapy.loader.processors import MapCompose, Compose
import logging
import functools
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
logger = logging.getLogger('dmoz')



def strip_blank(values):

    # return values.replace('-', 'jp-')
    return values


# def strip_space(itemloader, s):
#     logger.info(s)
#     return re.sub('\s{2,}', ' ', s )
# class strip_space(object):
#     def __call__(self, value):
#         logger.info(value)
#         newsub = functools.partial(re.sub, '\s{2,}', ' ')
#         return ''.join(map(newsub, value))
#         # return re.sub('\s{2,}', ' ', value[0])


class DmozLoader(ItemLoader):
    image_urls_in = MapCompose(strip_blank)


class DmozSpider(CrawlSpider):


    name = 'dmoz'
    allowed_domains = ['949hh.com']
#     start_urls = 'http://www.javmoo.info/cn/genre/30'
    start_urls = 'http://949hh.com/tupianzhuanqu/yazhousetu/'
    download_delay = 2

    rules = (
        #Rule(LinkExtractor(allow=r'/cn/movie/\w+'), callback='parse_item'),
        Rule(LinkExtractor(allow=r'/tupianzhuanqu/yazhousetu/\d+\.html'), callback='parse_item'),
        #Rule(LinkExtractor(allow=r'/Computers/Programming/Languages/Python/\w+/'), callback='parse_item', follow=False),

    )

    def __init__(self, *args, **kwargs):
        super(DmozSpider, self).__init__(*args, **kwargs)
        dispatcher.connect(self.spider_closed, signals.spider_closed)

    def spider_closed(self):
        logger.info('spider is closed')

    def start_requests(self):
        yield scrapy.Request(self.start_urls,
                             headers={'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"})

#     def parse_start_url(self, response):
#         s = u'下一页'
#         url = response.xpath('//a[text()="%s"]/@href' % (s)).extract()[0]
#         url = response.urljoin(url)
#         logger.info(url)
# 
#         yield scrapy.Request(url)

    def parse_item(self, response):
#         i = MydmozItem()
#         i['title'] = response.xpath('//h3/text()').extract()
#         i['image_urls'] = response.css('#sample-waterfall img::attr(src)').extract()
        i = DmozLoader(item=MydmozItem(), response=response)
        i.add_xpath('title', '//h1/text()')
#         i.add_xpath('link', 'a/@href')
#         i.add_xpath('desc', 'text()')
        i.add_css('image_urls', '.nrong img::attr(src)')
        yield i.load_item()
