from task_generator import *
from app_masterworkers import master, worker
from simgrid import Actor, Engine, Host, Mailbox, this_actor, Mutex
import gym
from gym import spaces
import app_masterworkers
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F

#client 
class LHPCSEnv(gym.Env):
    """Custom Environment that follows gym interface"""
    metadata = {'render.modes': ['human']}

    def __init__(self, tasks_size=10):
        super(LHPCSEnv, self).__init__()
        # Define action and observation space
        # They must be gym.spaces objects
        # Example when using discrete actions:
        #f: 0.1-1
        #1-worker_cnt
        #================ SimGrid ==========================
        argv = ['app_masterworkers.py', './platform/platform.xml', 'deployment.xml']
        self.e = Engine(argv)
        # self.e.register_actor("worker", worker)
        # Load the platform description and then deploy the application
        self.e.load_platform(argv[1]) 
        #============= Env ======================
        hosts = self.e.all_hosts
        self.observation_space = spaces.Box(low=0, high=np.inf,
                                    shape=(len(hosts)-1+1, 3), dtype=np.float64)  #current hosts + compute + communicate
        self.action_space = spaces.Discrete(len(hosts)-1) 
        #[1, 2, 3, 4, 5]
        # Example for using image as input:
        self.tasks_size = tasks_size
        self.tasks = TaskGenerator(500e6, 800e6, 10e6, 100000, 1e6, 10000, tasks_size)
        # self.tasks = self.CreateTasks(tasks_size)
        #============information ===============
        #speed and wattage
        self.workers_size = len(hosts)-1
        self.hosts_info = []
        self.POWER = {'Tremblay': 70, 'Jupiter': 55, 'Fafard': 50, 'Ginette': 35, 'Bourassa': 30, 'Jacquelin': 100, 'Boivin': 70}
        for host in hosts:
            if host.name == 'Tremblay':
                continue
            self.hosts_info.append([host.speed, self.POWER[host.name], 0])
        self.p = 0
        self.actions = []
        self.state = None
        self.RR_Res = self.RR_Sim()

    def step(self, action):
        # Run the simulation
        #action is an integer
        done = False
        info = "none"
        self.actions.append(action)
        task = self.tasks[self.p]
        #========= action (workload) =================
        self.state[action][2] += task[0]
        #======== change tasks to 0 =====================
        # self.state[self.workers_size + self.p] = [0, 0, 0]
        self.p += 1
        if self.p < self.tasks_size:
            self.state[-1] = [self.tasks[self.p][0], self.tasks[self.p][1], self.p]
        else:
            self.state[-1] = [0, 0, self.p]
        #======== reward: time =====================
        time = 0
        tasks = []
        for i in range(len(self.actions)):
            tasks.append(self.tasks[i])
        dict_t = self.Run(tasks, self.actions)
        t = []
        for key, value in dict_t.items():
            t.append(value)
        nt = t.copy()
        max_t = max(t)
        min_t = min(t)
        for i in range(len(t)):
            nt[i] = (nt[i] - min_t) / (max_t - min_t)
        time = max_t
        reward = -np.var(nt) +  self.RR_Res[self.p-1][0] / time

        #======== reward: energy ==================
        energy = 0
        for key, value in dict_t.items():
            energy += value * self.POWER[key]
        reward += self.RR_Res[self.p-1][1] / energy

        #======== done ======================
        if (self.p >= len(self.tasks)):
            done = True
        observation = self.state
        return observation, reward, done, info, time, energy

    def reset(self):
        # self.tasks = TaskGenerator(500e6, 600e6, 1e6, 1e5, 10e5, 1e5, self.tasks_size)
        # self.tasks = TaskGenerator(50e6, 5000e6, 1e6, 100000, 1e6, 10000, self.tasks_size)
        # self.tasks = TaskGenerator(500e6, 800e6, 10e6, 100000, 1e6, 10000, self.tasks_size)
        self.p = 0
        self.actions = []
        # ============== state ==================
        observation = []
        for i in range(len(self.hosts_info)):
            self.hosts_info[i][2] = 0
        for info in self.hosts_info:
            observation.append(info)
        # for i in range(len(self.tasks)):
            # observation.append([self.tasks[i][0], self.tasks[i][1], 0])
        observation.append([self.tasks[0][0], self.tasks[0][1], 0])
        self.state = observation
        return observation  # reward, done, info can't be included
    
    def close(self):
        pass

    def Run(self, tasks, actions):
        #run the simulator, get the final score
        res = {}
        Actor.create('Tremblay', Host.by_name('Tremblay'), master, tasks, actions)
        self.e.run()
        fp = open('./log.txt', 'r')
        line = fp.readline()
        line = line.strip('\n')
        while line:
            line = line.split(': ')
            res.setdefault(line[0], 0)
            res[line[0]] = float(line[1])
            line = fp.readline()
            line = line.strip('\n')
        fp.close()
        fp = open('./log.txt', 'w')
        fp.write('')
        fp.close()
        return res

    def render(self, mode='human'):
        #visualize
        pass

    def CreateTasks(self, n):
        tasks = []
        for i in range(n):
            compute_cost = 5000000000
            communicate_cost = 100000
            tasks.append([compute_cost, communicate_cost])
        return tasks

    def RR_Sim(self):
        #calculate RR-Result
        res = []
        for i in range(len(self.tasks)):
            tasks = []
            actions = []
            for j in range(i+1):
                tasks.append(self.tasks[j])
                actions.append(j % len(self.hosts_info))
            dict_t = self.Run(tasks, actions)
            time = 0
            energy = 0
            for key, value in dict_t.items():
                time = max(time, value)
                energy += value * self.POWER[key]
            res.append([time, energy])
        return res



if __name__ == '__main__':
    # a little test
    env = LHPCSEnv(20)
    x = env.reset()
    ep_r = 0
    i = 0
    o, r, done, _, time, e = env.step(i)
    ep_r += r
    while not done:
        i += 1
        o, r, done, _, time, e = env.step(i % 6)
        ep_r += r
        print(r, time, e)
    print(ep_r)
    print(env.actions)