from task_generator import *
from app_masterworkers import master, worker
from simgrid import Actor, Engine, Host, Mailbox, this_actor, Mutex
import gym
from gym import spaces
import app_masterworkers
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

NORMALIZED_STATE_FLAG = True

def min_max_normalize(lst):
    """对列表进行Min-max归一化"""
    # 找到列表中的最大值和最小值
    max_val = max(lst)
    min_val = min(lst)
    # 对每个元素进行归一化
    normalized = [(x - min_val) / (max_val - min_val) for x in lst]
    if max_val - min_val == 0:
        normalized = lst
    return normalized

#client 
class TestEnv(gym.Env):
    """Custom Environment that follows gym interface"""
    metadata = {'render.modes': ['human']}

    def __init__(self, tasks_size=20):
        super(TestEnv, self).__init__()
        #================ SimGrid ==========================
        argv = ['app_masterworkers.py', '../platform/platform.xml', 'deployment.xml']
        self.e = Engine(argv)
        self.e.load_platform(argv[1]) 
        #============= Env ======================
        hosts = self.e.all_hosts
        self.observation_space = spaces.Box(low=0, high=np.inf,
                                    shape=(len(hosts)-1+1, 3), dtype=np.float64)  #current hosts + compute + communicate
        self.action_space = spaces.Discrete(len(hosts)-1) 
        self.tasks_size = tasks_size
        self.tasks = TaskGenerator(500e6, 800e6, 10e6, 100000, 1e6, 10000, tasks_size)
        self.normalized_tasks = None
        #============information ===============
        #speed and wattage
        self.workers_size = len(hosts)-1
        self.hosts_info = []
        self.POWER = {'Tremblay': 70, 'Jupiter': 55, 'Fafard': 50, 'Ginette': 35, 'Bourassa': 30, 'Jacquelin': 100, 'Boivin': 70}
        for host in hosts:
            if host.name == 'Tremblay':
                continue
            self.hosts_info.append([host.speed, self.POWER[host.name], 0])
        self.p = 0
        self.actions = []
        self.state = None

    def step(self, action):
        # Run the simulation
        #action is an int
        done = False
        info = "none"
        self.actions.append(action)
        task = self.tasks[self.p]
        #========= action (workload) =================
        self.state[action][2] += task[0]
        #======== change tasks to 0 =====================
        self.p += 1
        if self.p < self.tasks_size:
            self.state[-1] = [self.tasks[self.p][0], self.tasks[self.p][1], self.p]
        else:
            self.state[-1] = [0, 0, self.p]
        tasks = []
        for i in range(len(self.actions)):
            tasks.append(self.tasks[i])
        dict_t = self.Run(tasks, self.actions)
        #======== time =====================
        t = []
        for key, value in dict_t.items():
            t.append(value)
        time = max(t)
        #======== reward: energy ==================
        energy = 0
        for key, value in dict_t.items():
            energy += value * self.POWER[key]

        #======== done ======================
        if (self.p >= len(self.tasks)):
            done = True
        if NORMALIZED_STATE_FLAG:
            observation = self.State_Normalize(self.state)
        else:
            observation = self.state
        
        # ========= info ==============
        info = {'time': time, 'energy': energy}
        return observation, done, info

    def reset(self):
        self.tasks = TaskGenerator(500e6, 800e6, 10e6, 100000, 1e6, 10000, self.tasks_size)
        self.p = 0
        self.actions = []
        self.normalized_tasks = None
        # ============== state ==================
        observation = []
        for i in range(len(self.hosts_info)):
            self.hosts_info[i][2] = 0
        for info in self.hosts_info:
            observation.append(info)
        # for i in range(len(self.tasks)):
            # observation.append([self.tasks[i][0], self.tasks[i][1], 0])
        observation.append([self.tasks[0][0], self.tasks[0][1], 0])
        self.state = observation
        if NORMALIZED_STATE_FLAG:
            observation = self.State_Normalize(observation)
        return observation # reward, done, info can't be included
    
    def close(self):
        pass

    def Run(self, tasks, actions):
        #run the simulator, get the final score
        res = {}
        Actor.create('Tremblay', Host.by_name('Tremblay'), master, tasks, actions)
        self.e.run()
        fp = open('./log.txt', 'r')
        line = fp.readline()
        line = line.strip('\n')
        while line:
            line = line.split(': ')
            res.setdefault(line[0], 0)
            res[line[0]] = float(line[1])
            line = fp.readline()
            line = line.strip('\n')
        fp.close()
        fp = open('./log.txt', 'w')
        fp.write('')
        fp.close()
        return res

    def render(self, mode='human'):
        #visualize
        pass

    def State_Normalize(self, state):
        arr = np.array(state)
        col0 = min_max_normalize(arr[0:len(arr)-1,0])
        col1 = min_max_normalize(arr[0:len(arr)-1,1])
        col2 = min_max_normalize(arr[0:len(arr)-1,2])
        # =========== normalized tasks ==================
        if self.normalized_tasks == None:
            tasks = np.array(self.tasks)
            t_col0 = min_max_normalize(tasks[0:len(tasks)-1,0])
            t_col1 = min_max_normalize(tasks[0:len(tasks)-1,1])
            self.normalized_tasks = []
            for i in range(len(t_col0)):
                self.normalized_tasks.append([t_col0[i], t_col1[i]])
        # ========== normalized state ==================
        p = arr[-1, -1]
        res = []
        for i in range(len(col0)):
            res.append([col0[i], col1[i], col2[i]])
        if p < len(self.normalized_tasks):
            task = self.normalized_tasks[int(p)]
        else:
            task = [0, 0]
        task.append(p/len(self.tasks))
        res.append(task)
        return res




if __name__ == '__main__':
    env = TestEnv(20)
    x = env.reset()
    # print(x)
    # print(torch.FloatTensor(x))
    i = 0
    o, done, info = env.step(i)
    while not done:
        i += 1
        o, done, info = env.step(i % 6)
        print(o)
        print(info)
        print("======================")