'''
This module contains all classes
derived from base class Queue
that represents all different 
queues available in L3Q
'''

from typing import Optional
#from abc import ABC
import abc

'''
Developers:
To create a new queue
make a subclass of the base class
Queue.
Implement required methods for
the new class that will handle
the processing of the queue.
Write class documentation
that describes the new queue
functionality as this info
will be printed to user.
'''

class QJob:
    '''
    This class will be used
    for queues to handle jobs,
    queues will run their
    queue algorithm on a list
    of QJobs.
    '''

    def __init__(self, jobid: int, hosts_alloc: int,
                 cores_alloc: int,  depend: list[int], init_date: str) -> None:
        self.jobid = jobid
        self.hosts_alloc = hosts_alloc
        self.cores_alloc = cores_alloc
        self.depend = depend
        self.init_date = init_date
        self.no_tasks = 0
        self.job_type = ''
        self.user = ''


    def __lt__(self, other: 'QJob') -> bool:
        '''
        Sort in init date order
        '''

        return self.init_date < other.init_date
    
        
    @classmethod
    def from_dict(cls, job: dict[str,str], no_tasks: int) -> Optional['QJob']:
        '''
        Factory method that creates
        a QJob instance from a dict
        that has keys equal to 
        class parameters.
        '''

        if('id' in job.keys()):
            jobid = int(job['id'])
        else:
            return None            

        if('hosts_alloc' in job.keys()):
            hosts_alloc = int(job['hosts_alloc'])
        else:
            return None
            
        if('cores_alloc' in job.keys()):
            cores_alloc = int(job['cores_alloc'])
        else:
            return None
            
        if('init_date' in job.keys()):
            init_date = job['init_date']
        else:
            return None

        if('type' in job.keys()):
            job_type = job['type']

        if('user' in job.keys()):
            user = job['user']

        
        depend = []


        if('depend' in job.keys()):
            for d in job['depend'].split(","):
                if(d.isdigit()):
                    depend.append(int(d))

        j =  QJob(
            jobid ,
            hosts_alloc,
            cores_alloc,
            depend,
            init_date,
        )

        j.no_tasks = no_tasks
        j.job_type = job_type
        j.user = user
        
        return j



class QNode:
    '''
    This class will be used
    for queues to handle nodes,
    queues will run their
    queue algorithm on a list
    of QNodes.
    '''
    def __init__(self, nodeid: int, total_cpus: int, alloc_cpus: int) -> None:
       self.nodeid = nodeid
       self.total_cpus = total_cpus
       self.alloc_cpus = alloc_cpus
       self.used_cpus = 0
       self.total_memory = 0
       self.used_memory = 0
       self.total_swap = 0
       self.used_swap = 0
       
    def __lt__(self, other: 'QNode') -> bool:
        '''
        Sort in number of allocated cpus order
        '''

        return self.alloc_cpus < other.alloc_cpus

    
    @classmethod
    def from_dict(cls, node: dict[str,str]) -> Optional['QNode']:
        '''
        Factory method that creates
        a QNode instance from a dict
        that has keys equal to 
        class parameters.
        '''

        if('id' in node.keys()):
            node_id = int(node['id'])
        else:
            return None

        if('total_cpus' in node.keys()):
            if(node['total_cpus'] == None):
                total_cpus = 0
            else:
                total_cpus = int(node['total_cpus'])
        else:
            return None
            
        if('alloc_cpus' in node.keys()):
            if(node['alloc_cpus'] == None):
                alloc_cpus = 0
            else:
                alloc_cpus = int(node['alloc_cpus'])
        else:
            return None

        if('used_cpus' in node.keys()):
            if(node['used_cpus'] == None):
                used_cpus = 0
            else:
                used_cpus = int(node['used_cpus'])

        if('total_memory' in node.keys()):
            if(node['total_memory'] == None):
                total_memory = 0
            else:
                total_memory = int(node['total_memory'])

        if('used_memory' in node.keys()):
            if(node['used_memory'] == None):
                used_memory = 0
            else:
                used_memory = int(node['used_memory'])

        if('total_swap' in node.keys()):
            if(node['total_swap'] == None):
                total_swap = 0
            else:
                total_swap = int(node['total_swap'])

        if('used_swap' in node.keys()):
            if(node['used_swap'] == None):
                used_swap = 0
            else:
                used_swap = int(node['used_swap'])

        n = QNode (
            node_id,
            total_cpus,
            alloc_cpus
        )

        n.used_cpus = used_cpus
        n.total_memory = total_memory
        n.used_memory = used_memory
        n.total_swap = total_swap
        n.used_swap = used_swap

        return n

        




class QRun:
    '''
    This class will be used
    for queues to handle jobs to run,
    queues will return a list
    of QRun that should be sent
    to the nodes to execute.
    '''
    def __init__(self, job: QJob, node_ids: list[int]) -> None:
       self.job = job
       self.node_ids = node_ids



       
class Queue(metaclass = abc.ABCMeta):
    '''
    Main base class that all different
    queues must implement.
    '''

    @abc.abstractmethod
    def run_queue(self, jobs: list[QJob], nodes: list[QNode]) -> list[QRun]:
        pass

class Default(Queue):
    '''
    The default queueing system in the l3q daemon
    is similar to a FIFO (first in, first out) stack.
    The first job that is added will be launched first,
    if there are more queued jobs they will all be
    processed in turn from the top. If there are
    enough resources available for the next queued job
    to be launch it will be started, until there are no
    queued jobs left or not enough resources are
    available. All jobs are processed from top to bottom
    and when the first queued job will not fit the processing
    stops, even if there are smaller jobs further down in the
    queue that would be able to be launched this will not
    happen. That job has to wait until all previous queued
    jobs has beed launched first.
    Jobs that are in Depend state are not inluded in the
    queue until dependent jobs are finished.
    '''

    def run_queue(self, jobs: list[QJob], nodes: list[QNode]) -> list[QRun]:
        jobs.sort()
        nodes.sort()

        runs = []
        for j in jobs:
            if j.hosts_alloc > len(nodes):
                continue

            alloc_nodes: list[int] = []
            for n in nodes:
                if(len(alloc_nodes) >= j.hosts_alloc):
                    break
                
                avail_cores = n.total_cpus - n.alloc_cpus
                if(avail_cores >= j.cores_alloc):
                    n.alloc_cpus += j.cores_alloc
                    alloc_nodes.append(n.nodeid)

            if(len(alloc_nodes) == j.hosts_alloc):
                runs.append(QRun(j, alloc_nodes))
            else:
                break

        return runs



class Fill(Queue):
    '''
    The fill queueing system in the l3q daemon
    is similar to a FIFO (first in, first out) stack and
    that fills up more jobs to make nodes as busy
    as possible. 
    The first job that is added will be launched first,
    if there are more queued jobs they will all be
    processed in turn from the top. If there are
    enough resources available for the next queued job
    to be launch it will be started, until there are no
    queued jobs left or not enough resources are
    available. Jobs further down in the queue will
    be processes to see if they are able to fit in
    available resources. Processing of more jobs
    will finish when no job will fit in available
    resorces.
    All jobs are processed from top to bottom
    and when the first queued job will not fit the processing
    continues to the next job in queue until no more jobs
    will fit.
    Jobs that are in Depend state are not inluded in the
    queue until dependent jobs are finished.
    '''

    def run_queue(self, jobs: list[QJob], nodes: list[QNode]) -> list[QRun]:
        jobs.sort()
        nodes.sort()

        runs = []
        for j in jobs:
            if j.hosts_alloc > len(nodes):
                continue

            alloc_nodes: list[int] = []
            for n in nodes:
                if(len(alloc_nodes) >= j.hosts_alloc):
                    break
                
                avail_cores = n.total_cpus - n.alloc_cpus
                if(avail_cores >= j.cores_alloc):
                    n.alloc_cpus += j.cores_alloc
                    alloc_nodes.append(n.nodeid)

            if(len(alloc_nodes) == j.hosts_alloc):
                runs.append(QRun(j, alloc_nodes))


        return runs


