import collections
import copy


class SRSFPolicy(object):
    def __init__(self, time_fn):
        self._time_fn = time_fn
        #self._queue_threshold = 3600 * 16
        self._solve_starvation = 0
        self._queue = []
        self._status = {} # status can be 'RUNNING' or 'PENDING'

        # in Counter, the default value of a non-existing key is 0.
        self._last_check_time = collections.Counter()
        self._total_executed_time = collections.Counter() # this is the total time using GPUs
        self._executed_time = collections.Counter()       # this is the time using GPUs for the last consecutive period
        self._pending_time = collections.Counter()        # this is the total pending time
        self._last_pending_time = collections.Counter()   # this is the time pending for the last consecutive period 

        self._remaining_service = collections.Counter()

    def optimize(self, jobs, nodes, prev_allocations, node_template): # jobs: submitted but not completed
        event_time = int(self._time_fn())
        # Remove completed jobs.
        self._queue = [key for key in self._queue if key in jobs] # key: job_name
        self._status = {key: val for key, val in self._status.items() if key in jobs} # also filter out those finished jobs
        allocations = {key: val for key, val in prev_allocations.items() if key in jobs}

        # Add new jobs to pending.
        for key, job in jobs.items():
            if key not in self._status:
                self._status[key] = 'PENDING'
                self._queue.append(key)

        # Update the job information (executed_time, pending_time, etc.) and the queue.
        for key, job in jobs.items():
            assert self._status[key] in ('RUNNING', 'PENDING')
            if self._status[key] == 'RUNNING':  # Job is running.
                tmp = int(event_time - self._last_check_time[key]) 
                self._total_executed_time[key] = int(self._total_executed_time[key] + tmp)
                self._executed_time[key] = int(self._executed_time[key] + tmp) 
                self._last_check_time[key] = event_time
                self._last_pending_time[key] = 0
            elif self._status[key] == 'PENDING':
                tmp = int(event_time - self._last_check_time[key]) 
                self._last_check_time[key] = event_time
                self._pending_time[key] = int(self._pending_time[key] + tmp) #this is the total pending_time
                self._last_pending_time[key] = int(self._last_pending_time[key] + tmp) 
                self._executed_time[key] = 0

        self._queue = sorted(self._queue, key=lambda x: jobs[x].remaining_service)

        # Update statuses.
        total_gpus = {idx: int(node.resources['nvidia.com/gpu']) for idx, node in nodes.items()}
        num_gpus = sum(total_gpus.values())
        for idx in self._queue:
            if jobs[idx].max_replicas <= num_gpus:  # has enough resources
                self._status[idx] = 'RUNNING'
                num_gpus -= jobs[idx].max_replicas
            else:
                self._status[idx] = 'PENDING'
                allocations.pop(idx, None)

        # Update allocations. confused? No SRSF?
        free_gpus = collections.Counter(total_gpus) - collections.Counter(sum(allocations.values(), []))
        for idx in self._queue:
            if self._status[idx] == 'RUNNING' and not allocations.get(idx):
                # Allocate resources.
                allocations[idx] = []
                while len(allocations[idx]) < jobs[idx].max_replicas:
                    node_idx, count = free_gpus.most_common(1)[0]
                    num = min(count, jobs[idx].max_replicas - len(allocations[idx]))
                    allocations[idx].extend([node_idx] * num)
                    free_gpus[node_idx] -= num

        print(allocations)
        # Objective values, allocations, active nodes.
        return allocations, len(nodes)
