"""Starts live scrape spider.

Args:
    sourceId (str): Name of config to start.


        :param crawler_or_spidercls: already created crawler, or a spider class
            or spider's name inside the project to create it
        :type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
            :class:`~scrapy.spiders.Spider` subclass or string
"""

import argparse

from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from util import loggerUtil
import setup_dirs

from scraper.spiders.IndexPageSpider import IndexPageSpider
from scraper.spiders.NewsSpider import NewsSpider

class LiveScrapeStarter:
    def __init__(self, cname):
        self.cname = cname
        settings = get_project_settings()
        # settings.overrides['FEED_FORMAT'] = 'json'
        # settings.overrides['FEED_URI'] = 'result.json'
        self.process = CrawlerProcess(settings)
        loggerUtil.setRotatingRootLogger(setup_dirs.data + '/{}.log'.format(cname))

    def run_url_update(self):
        # self.process.crawl('IndexPage', sourceId=self.cname)
        self.process.crawl(IndexPageSpider, sourceId=self.cname)

    def run_scrape(self):
        self.process.crawl(NewsSpider, sourceId=self.cname)

    def start(self):
        self.process.start()

parser = argparse.ArgumentParser()
parser.add_argument('sourceId')

args = parser.parse_args()

scrape = LiveScrapeStarter(args.sourceId)

scrape.run_url_update()
scrape.run_scrape()
scrape.start()
