import threading
import time
import logging
import setup_dirs
import util.loggerUtil
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, send_file
from flask_restplus import Resource, Api, Namespace, fields
from scraper.ScrapeConfigManager import ScrapeConfigManager
from scraper.scrape import ScrapeManager, LiveScrape

app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
api = Api(app, version='5.0', title='IDMI Crawler System',
          description='IDMI Crawler Interface System')
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'

scraper_api = Namespace(name='article_scraper',
                        description='Interface for Consline')
api.add_namespace(scraper_api)
scraper_cfg = Namespace(name='article_config',
                        description='Configs for Consline')
api.add_namespace(scraper_cfg)

job_model = api.model('job',
                      {'job_id': fields.String, 'args': fields.String, 'interval': fields.String})
scrape_model = api.model('scrape',
                         {'sourceId': fields.String, 'type': fields.String})

apsched = BackgroundScheduler(logger=app.logger)
apsched.start()

log_handler = util.loggerUtil.get_rotatingfilehandler(setup_dirs.data + '/app.log')
app.logger.addHandler(log_handler)
app.logger.setLevel(logging.INFO)

scrape_manager = ScrapeManager(app.logger)
scrape_config_manager = ScrapeConfigManager(app.logger)


# ---------------------------------------
def add_interval_job(job, sourceId, interval):
    """Create job for starting scrapes"""
    source_exists = sourceId in scrape_config_manager.sourceTable
    if source_exists:
        apsched.add_job(job, 'interval', args=[sourceId], minutes=int(interval))
        app.logger.info('scrape job added. source: {}. interval: {} minutes'.format(sourceId, interval))
    else:
        app.logger.error('No valid config files associated with sourceId ' + sourceId + ' Job was not added.')

    return source_exists


def log_scraper_msg():
    with app.app_context():
        while True:
            scrape_manager.poll_processes()
            time.sleep(5)


@app.before_first_request
def initialize():
    """Start thread to poll subprocesses and log output."""
    subprocess_thread = threading.Thread(target=log_scraper_msg)
    subprocess_thread.daemon = True
    subprocess_thread.start()
    start_default_scrape()


def start_default_scrape():
    all_good = True
    source_table = scrape_config_manager.sourceTable
    for sourceId in source_table:
        all_good &= add_interval_job(live_scrape_job, sourceId, 180)
    app.logger.info('Started default scrape of all sources in 15 min interval')
    if not all_good:
        app.logger.info('Some faulty configs were detected and will be skipped')
    pass


# ---------------------------------------

@app.after_request
def set_response_headers(response):
    response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
    response.headers['Pragma'] = 'no-cache'
    response.headers['Expires'] = '0'
    return response


def live_scrape_job(sourceId):
    """Start single scrape"""
    configs = scrape_config_manager.get_configs_by_sourceId(sourceId)
    one_good = configs is not None
    if one_good:
        for config in configs:
            with app.app_context():
                scrape = LiveScrape(config)
                scrape_manager.add_scrape(scrape)
    else:
        app.logger.error('No valid config files associated with sourceId ' + sourceId + ' Scrape will not be performed')
    return one_good


@scraper_api.route('/live_scrape/<string:sourceId>')
class RunLiveScrape(Resource):
    @scraper_api.doc(description='Start one interval scraper crawling website ,'
                                 'You need to specify the code of website name')
    def post(self, sourceId):
        if live_scrape_job(sourceId):
            return {'status': 'Done'}
        else:
            return {'status': 'Failed'}


@scraper_api.route('/running_scrapes')
class RunningScrapes(Resource):
    @scraper_api.doc(description='Returns list of currently running scrapes')
    def get(self):
        return list(scrape_manager.scrapes.keys())


@scraper_cfg.route('/list')
class GetSources(Resource):
    @scraper_cfg.doc(description='Display configured source')
    def get(self):
        return list(scrape_config_manager.get_configs())


if __name__ == '__main__':
    app.run(host='0.0.0.0',
            port=7000,
            debug=True)
