# coding: utf-8
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from lenin.items import LeninWork

class LeninSpider(CrawlSpider):
    name = "lenin"
    allowed_domains = ["marxists.org"]
    start_urls = [
    "http://www.marxists.org/archive/lenin/works/cw/index.htm"
    ]

    #Estas son las reglas que aplica a cada link que encuentra
    rules = (
        # Cuando se topa con un "volume", sigue los links
        Rule(SgmlLinkExtractor(allow=('volume\d+\.htm', )), follow=True),
        # Idem si es un indice (de una obra)
        Rule(SgmlLinkExtractor(allow=('index\d+\.htm', )), follow=True),
        # Si no es ninguna de las anteriores, es una obra y hay que parsearla!
        Rule(SgmlLinkExtractor(allow=('.+.htm', )), callback='parse_item'),
    )

    def parse_item(self, response):
        x = HtmlXPathSelector(response)

        work = LeninWork()
        work['url'] = response.url
        work['name'] = x.select("//h3[@class='title']/a/text()").extract()
        paragraphs = x.select('//p').extract()
        work['text'] = " ".join(paragraphs)
        work['year'] = "ind"
        for y in range(1870,1925):
            if work['url'].count(str(y)) > 0:
                work['year'] = str(y)
        work['month'] = "ind"
        for m in ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']:
            if work['url'].count(m) > 0 and ( not m == 'mar' or work['url'].count(m) == 2) :
                work['month'] = m
        return work