import re
from kateglo_crawler.spiders import sanitize
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import Rule
from scrapy.contrib.spiders.crawl import CrawlSpider
from scrapy.selector.libxml2sel import HtmlXPathSelector

from __init__ import *
from kateglo_crawler.items import AbbrevationItem


#
# ABBREVATION CRAWL
#
class AbbrevationCrawler(CrawlSpider):
    # Identifier
    name = 'abbrevation-crawler'

    # Debug
    debug = False

    # Rules
    allowed_domains = ['kateglo.bahtera.org']
    start_urls = [
        'http://kateglo.bahtera.org/?mod=abbr',
    ]
    rules = (
        Rule(SgmlLinkExtractor(allow=(re.compile('&mod=abbr')), deny=('tag='), unique=True), follow=True, callback='parse_item'),
    )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)

        # Assign given elements
        shorts = hxs.select('//table[@class="list"]//tr/td[2]').extract()
        long_ids = hxs.select('//table[@class="list"]//tr/td[3]').extract()
        long_ens = hxs.select('//table[@class="list"]//tr/td[4]').extract()
        tags = hxs.select('//table[@class="list"]//tr/td[5]').extract()
        notes = hxs.select('//table[@class="list"]//tr/td[6]').extract()
        

        abbr_items = []
        for i in range(0, len(shorts)):
            abbr_item = AbbrevationItem()
            abbr_item['short'] = sanitize(shorts[i])
            abbr_item['long_id'] = sanitize(long_ids[i])
            abbr_item['long_en'] = sanitize(long_ens[i])
            abbr_item['tag'] = sanitize(tags[i])
            abbr_item['notes'] = sanitize(notes[i])
            abbr_item['urlsource'] = response.url
            abbr_items.append(abbr_item)
        return abbr_items