import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
import matplotlib.pyplot as plt
from env_test import TestEnv
import random
#=============== Hyper Parameters ====================================
HIDDEN_SIZE= 32
N_ACTIONS = 6 #workers
N_STATES = 7 * 3
#====================== Net Class ==========================================
class Net(nn.Module):
    def __init__(self, ):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(N_STATES, HIDDEN_SIZE)
        self.fc2 = nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)
        self.out = nn.Linear(HIDDEN_SIZE, N_ACTIONS)   

    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = F.relu(x)
        actions_value = self.out(x)
        return actions_value

#==================== Test ================================================
class Test():
    def __init__(self):
        self.model = Net()
        self.model.load_state_dict(torch.load('./model/dqn_model.pth'))
        self.env = TestEnv(20)
        self.path = './test_data/DQN/'

    def run(self):
        # run simulate, dispatches tasks 
        s = self.env.reset()
        done = False
        while not done:
            #======= get action ===================
            s = torch.unsqueeze(torch.FloatTensor(s), 0)
            s = s.flatten()
            Q = self.model.forward(s)
            Q = Q.data
            _, action = torch.max(Q, 0)
            action = int(action)
            s, done, info = self.env.step(action)
        print(info)
        print(self.env.actions)
        self.dump_data(self.path+'tasks', self.env.tasks)
        self.dump_data(self.path+'actions', self.env.actions)
        self.dump_data(self.path+'info', info)
    
    def dump_data(self, save_path, data):
        fp = open(save_path, 'a')
        print(data, file=fp)
        fp.close()

    def get_info(self, dict_t):
        # get final info {'time': , 'energy': }
        # input is the result of self.env.Run
        #======== time =====================
        t = []
        for key, value in dict_t.items():
            t.append(value)
        time = max(t)
        #======== energy ==================
        energy = 0
        for key, value in dict_t.items():
            energy += value * self.env.POWER[key]
        return {'time': time, 'energy': energy}

    def RR(self):
        fp = open(self.path + 'tasks')
        line = fp.readline()
        while line:
            tasks = eval(line)
            actions = [i % 6 for i in range(20)]
            dict_t = self.env.Run(tasks, actions)
            info = self.get_info(dict_t)
            line = fp.readline()
            # save data
            self.dump_data('./test_data/RR/actions', actions)
            self.dump_data('./test_data/RR/info', info)
            
    def RAND(self):
        fp = open(self.path + 'tasks')
        line = fp.readline()
        while line:
            tasks = eval(line)
            actions = [random.randint(0, 5) for i in range(20)]
            dict_t = self.env.Run(tasks, actions)
            info = self.get_info(dict_t)
            line = fp.readline()
            # save data
            self.dump_data('./test_data/RAND/actions', actions)
            self.dump_data('./test_data/RAND/info', info)
    
    def SJF(self):
        # SJF as complete
        fp = open(self.path + 'tasks')
        line = fp.readline()
        while line:
            tasks = eval(line)
            tasks = sorted(tasks)
            print(tasks)
            actions = [i % 6 for i in range(20)]
            dict_t = self.env.Run(tasks, actions)
            info = self.get_info(dict_t)
            line = fp.readline()
            # save data
            self.dump_data('./test_data/SJF/actions', actions)
            self.dump_data('./test_data/SJF/info', info)

if __name__ == '__main__':
    test = Test()
    for i in range(20):
        test.run()
    test.RR()
    test.RAND()
    test.SJF()
