from abc import ABC, abstractmethod
from crawler.base_spider import BaseSpider
from typing import Optional
import time
from constants import Seconds
import threading
from utils.log import logger
import concurrent
from scrapy.crawler import CrawlerProcess

class BaseJob(threading.Thread):
    name: str
    running: bool
    spider: Optional[BaseSpider]

    def __init__(self):
        threading.Thread.__init__(self)
        self.running = False
        self.logger = logger

    def run(self, interval):
        self.running = True
        self.logger.info("{} job start".format(self.name))
        while self.running:
            try:
                accounts = self.fetch_accounts()
                self.start_spider_for_accounts(accounts)
                self.process_user_data()
                time.sleep(interval*Seconds.ONE_DAY)
            except Exception as e:
                self.running = False
                self.logger.error("Error in {} Job: {}".format(self.name, str(e)))

    @abstractmethod
    def fetch_accounts(self):
        raise NotImplementedError

    @abstractmethod
    def process_user_data(self):
        raise NotImplementedError

    def start_spider_for_accounts(self, accounts):
        for account in accounts:
            try:
                self.start_spider(account)
            except Exception as e:
                self.logger.error("Error processing account {}: {}".format(account, str(e)))

    def start_spider(self, account):
        assert self.spider is not None, "spider is None"
        process = CrawlerProcess()
        process.crawl(self.spider, account)
        process.start()
        process.join()

    def __del__(self):
        self.running = False
        if self.is_alive():
            self.join()