import sys
import json
import scrapy
from scrapy.crawler import CrawlerProcess


class SidacSpider(scrapy.Spider):

    name = 'sidac'
    dep_tok = 0

    def parse(self, response):
        self.dep_tok = self.dep_tok + 1
        for block in response.css('div.quote'):
            yield {k: block.css(v).get() for k, v in self.target.items()}
        # print("\n\nCrawl depth: %s\n\n" % self.dep_tok)

        next_page = response.css('li.next a::attr(href)').get()
        if next_page is not None:
            yield response.follow(next_page, callback=self.parse)


if __name__ == "__main__":
    if len(sys.argv) != 2:
        exit("Usage: sidac <absolute_config_file_path>")
    print("Loading configurations from %s" % sys.argv[1])
    with open(sys.argv[1]) as f:
        confs = json.load(f)
    SidacSpider.start_urls = confs['start_urls']
    SidacSpider.target = confs['target']
    process = CrawlerProcess({
        'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
        'DOWNLOAD_DELAY': confs['request_interval'],
        'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
        'DEPTH_LIMIT': confs['max_depth'],
        'FEED_URI': confs['output_file'],
        'LOG_LEVEL': 'INFO',
    })
    process.crawl(SidacSpider)
    process.start()
