# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
import os


class Ir_Spider(CrawlSpider):
    name = 'irspider'
    allowed_domains = ['concordia.ca']
    start_urls = ["http://encs.concordia.ca"]

    rules = (
        Rule(SgmlLinkExtractor(allow=(r".*",)), follow=True, callback="dump_html"),)

    def dump_html(self, response):
        outname = response.url
        outname = outname.replace("/", "_")
        dir_path = "outputs/"
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        else:
            outname = os.path.join(dir_path, outname)
        self.log('Dumping %s to %s' % (response.url, outname))
        output = open(outname, "w")
        output.write(response.body)
        output.close()