import random
from typing import List, Optional, Tuple, Union

import torch
import sys
from entity.processor import Processor
import torch.nn.functional as F

from entity.task import Task
from utils.gen import gen_test_cases
from constants import TASK_BATCH


class Environment:
    def __init__(self, tasks: List[Task], processors: List[Processor]):
        self.tasks = tasks
        self.cur_batch_idx = 0
        self.cur_task_idx = 0
        self.n_batches = len(tasks) / TASK_BATCH
        self.processors = processors
        self.schedule_map = {}
        self.tasks.sort(key=lambda t: (t.deadline, t.length()))
        self.task_batch: List[Task] = []

    def cur_task_batch(self) -> Optional[List[Task]]:
        if self.cur_batch_idx <= self.n_batches:
            start = (self.cur_batch_idx - 1) * TASK_BATCH
            end = start + TASK_BATCH
            return self.tasks[start:end]
        return None

    def calc_exec_time(self, task: Task, processor: Processor) -> float:
        task_len = task.length()
        exec_time = task_len / processor.compute_ability
        return exec_time

    def calc_reward(self, task_idx: int) -> float:
        r = 0
        task = self.task_batch[task_idx]
        makespan_map = {}
        for processor in self.processors:
            makespan_map[processor.id] = processor.load

        min_expected_makespan = sys.maxsize
        min_processor: Optional[Processor] = None
        max_exec_time = 0
        for processor in self.processors:
            exec_time = self.calc_exec_time(task, processor)
            expected_makespan = makespan_map[processor.id] + exec_time
            max_exec_time = max(max_exec_time, exec_time)
            if expected_makespan < min_expected_makespan:
                min_expected_makespan = expected_makespan
                min_processor = processor
        # print(
        #     f"Schedule task[id = {task.id}] to processor[id = {processor.id}]")
        min_processor.load = min_expected_makespan
        if min_expected_makespan > task.deadline:
            r -= 1

        sum_makespan = 0
        for pid in makespan_map:
            sum_makespan += makespan_map[pid]

        max_makespan = sum_makespan + max_exec_time
        min_makespan = sum_makespan / len(self.processors)

        makespan = 0
        for pid in makespan_map:
            makespan = max(makespan, makespan_map[pid])

        self.schedule_map[task.id] = pid
        r += 1 - (makespan - min_makespan) / (max_makespan - min_makespan)
        return r

    def reset(self) -> Tuple[torch.Tensor]:
        if self.no_incoming_tasks():
            random.shuffle(self.tasks)

            for processor in self.processors:
                processor.load = 0

            self.schedule_map.clear()

            self.cur_batch_idx = 1
            self.cur_task_idx = 0
        else:
            self.cur_batch_idx += 1
        return self.get_state()

    def get_task_batch_features(self, task_batch: List[Task]) -> torch.Tensor:
        gatther = []
        for task in task_batch:
            gatther.append(task.get_feature())
        return torch.FloatTensor(gatther)

    def get_processor_features(self) -> torch.Tensor:
        gatther = []
        for processor in self.processors:
            gatther.extend(processor.get_feature())
        return torch.FloatTensor(gatther)

    def no_incoming_tasks(self) -> bool:
        return self.cur_batch_idx == self.n_batches

    def get_state(self, last_task_id: Optional[int] = None) -> Tuple[torch.Tensor, torch.Tensor]:
        if last_task_id is None:
            self.task_batch = self.cur_task_batch()
            # print(self.task_batch)
        else:
            # print([t.id for t in self.task_batch])
            self.task_batch.remove(self.task_batch[last_task_id])

        # print(self.task_batch)
        task_features = self.get_task_batch_features(self.task_batch)
        # print('task_features', task_features)

        processor_features = self.get_processor_features()
        # print('processor_features', processor_features)

        return (F.normalize(task_features, dim=-1), F.normalize(processor_features, dim=0))

    # return (state, reward, done)
    def step(self, task_idx: int) -> Tuple[Optional[torch.Tensor], float, bool]:
        # compute rewards
        r = self.calc_reward(task_idx)

        self.cur_task_idx += 1
        # print(self.cur_batch_idx, self.n_batches)
        if self.cur_task_idx % TASK_BATCH == 0:
            return None, r, True

        # print(self.cur_batch_idx, self.cur_task_idx)
        state = self.get_state(task_idx)
        return state, r, False
