from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from subprocess import call
from re import compile, search

class IatiSpider(Spider):
    name = "iati"
    # Crawler won't move outside this domain
    allowed_domains = ["iatiregistry.org"]
    # URLs to start with
    start_urls = [
            "http://www.iatiregistry.org/publisher/undp",
            "http://www.iatiregistry.org/publisher/worldbank"
    ]

    def __init__(self):
        self.reg = compile("WB-\d|IATI_O|GLOBAL")

    def parse(self, response):
        # Findes all links to xml with aid data
        sel = Selector(response)
        for link in sel.xpath('//li[@class="dataset-item"]/p/a[@target="_blank"][1]/@href').extract():
            # Filter xmls with metadata
            if self.reg.search(link) == None:
                # Print link to stdout
                print link

        # Find link to next page and process it
        siteRoot = sel.xpath('//body/@data-site-root').extract()[0][:-1] 
        if len(sel.xpath(u'//div[@class="pagination pagination-centered"]/ul/li[last()]/a[text()="\xbb"]/@href').extract()) > 0:
            nextRelative = sel.xpath(u'//div[@class="pagination pagination-centered"]/ul/li[last()]/a[text()="\xbb"]/@href').extract()[0]
            nextPage = siteRoot + nextRelative
            yield Request(nextPage, callback=self.parse)
