import os
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import copy
from replay_memory import ReplayMemory
from grid_world_v2 import GridWorld
import torch.nn as nn
import torch
import math
import random

## os.environ["THEANO_FLAGS"] = 'device=gpu1'
# from IPython.display import clear_output
## DQN Class
class Target_model(nn.Module):
    def __init__(self, ):
        super(Target_model, self).__init__()
        self.conv_cls = nn.Sequential(
            nn.Conv2d(4, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=False),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=False),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=False),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=False),
            nn.Flatten(),
            nn.ReLU(inplace=False),
            nn.Linear(3200, 5)
        )
    def forward(self, x):
        y = self.conv_cls(x)
        return y

#grid_world = GridWorld()
#grid_world.print_grid()

class DQN(object):
    def __init__(self, gamma=0.975, epsilon=1, epsilon_decay=.99, terminal_reward=10, env=None,
                 memory=None, reward_discount_factor=0.0, model_name=None, total_cars=3, grid_size=6):

        self.env = env
        self.losses = []
        self.all_games_history = []
        self.replay = memory
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.reward_discount_factor = reward_discount_factor
        self.terminal_reward = terminal_reward
        self.grid_size = grid_size
        self.total_cars = total_cars
        self.model = self.loadModel(model_name)

    def loadModel(self, model_name):
        if model_name == 'Naive':
            model = 'Naive'
            return model
        else:
            model = Target_model()
            if model_name != None:
                checkpoint = torch.load(model_name, map_location=torch.device('cpu'))
                model.load_state_dict(checkpoint['state_dict'])
                return model
            else:
                return model

    def createTraining(self, minibatch):
        #X_train = []
        #y_train = []
        # for memory in minibatch:
        #     old_state_m, action_m, reward_m, new_state_m, terminal_m = memory
        #     old_state_m_tenor = torch.from_numpy(old_state_m).view(-1, 4, 10, 10).to(torch.float32)
        #     new_state_m_tenor = torch.from_numpy(new_state_m).view(-1, 4, 10, 10).to(torch.float32)
        #     old_qval = self.model(old_state_m_tenor)
        #     newQ = self.model(new_state_m_tenor)
        #     maxQ = newQ.max().item()
        #     y = np.zeros((1, self.env.num_actions))
        #     y[:] = old_qval.detach().numpy()[:]
        #     if not terminal_m:
        #         update = (reward_m + (self.gamma * maxQ))
        #     else:
        #         update = reward_m
        #     y[0][action_m] = update
        #     X_train.append(old_state_m)
        #     y_train.append(y.reshape(self.env.num_actions, ))
        #X_train = np.array(X_train)
        #y_train = np.array(y_train)

        minibatch_arr = np.asarray(minibatch)
        old_state_m = minibatch_arr[:, 0]
        action_m = minibatch_arr[:, 1]
        reward_m = minibatch_arr[:, 2]
        new_state_m = minibatch_arr[:, 3]
        terminal_m = minibatch_arr[:, 4]

        old_trm = np.vstack(old_state_m)
        old_state_m_tenor = torch.from_numpy(old_trm).view(-1, 4, 10, 10).to(torch.float32)
        old_qval = self.model(old_state_m_tenor)

        new_trm = np.vstack(new_state_m)
        new_state_m_tenor = torch.from_numpy(new_trm).view(-1, 4, 10, 10).to(torch.float32)
        newq = self.model(new_state_m_tenor)
        new_max_value = torch.max(newq, 1)
        maxq = new_max_value.values * self.gamma
        update = reward_m + maxq.detach().numpy() * (1 - terminal_m)

        aux_col = np.array(range(len(action_m)))
        aux_action_m = np.column_stack((aux_col, action_m)).astype(int)

        y_old_qval = old_qval.detach().numpy()
        y_old_qval[(aux_action_m[:, 0], aux_action_m[:, 1])] = update
        y_old_qval_tenor = torch.from_numpy(y_old_qval).view(-1, 5).to(torch.float32)

        return old_state_m_tenor, y_old_qval_tenor

    def graphLoss(self):
        epochs = []
        losses_A = []
        count = 0
        for y in self.losses:
            losses_A.append(y[0])
            epochs.append(count)
            count = count + 1
        plt.plot(epochs[:], losses_A[:])

    def train(self, epochs, episodes, max_episode_length, output_weights_name):

        criterion1 = torch.nn.MSELoss()  # .cuda() # for Global loss
        optimizer = torch.optim.Adam(self.model.parameters())
        torch.autograd.set_detect_anomaly(True)
        for i in range(epochs + 1):
            start_time = time.time()
            for j in range(episodes):
                terminal = False
                self.env.reset()
                while (not terminal and self.env.t < max_episode_length):
                    self.env.t += 1
                    # print("Epoch #: %s" % (i,))
                    # print("Game #: %s" % (j,))
                    # print("Timestep #: %s" % (self.env.t,))
                    # print('Epsilon : ', float(self.epsilon))
                    ## self.env.print_grid()

                    #Bookeeping
                    # curr_history = {}
                    # curr_history['t'] = self.env.t
                    # curr_history['curr_state'] = (self.env.cars_grid.copy(), self.env.cust_grid.copy())

                    # Step all agents 1 timestep
                    all_agents_step = self.env.stepAll(self.model, self.epsilon)
                    # More Bookeeping
                    # curr_history['actions'] = all_agents_step
                    # curr_history['next_state'] = (self.env.cars_grid.copy(), self.env.cust_grid.copy())
                    # self.env.history.append(curr_history)

                    # Check if terminal amount of cutomers picked up to end episode
                    terminal = self.env.isTerminal()
                    #clear_output(wait=True)

                    # Add memory to replay
                    for memory in all_agents_step:
                        self.replay.addToMemory(memory, terminal)
                # After each game/episode fit the network with a minibatch sampled from the replay memory
                if self.replay.isFull():
                    minibatch = self.replay.getMinibatch()
                    X_train, y_train = self.createTraining(minibatch)
                    #X_train01 = torch.from_numpy(X_train).to(torch.float32)
                    #y_train01 = torch.from_numpy(y_train).to(torch.float32)
                    y_output = self.model(X_train)
                    losses = criterion1(y_output, y_train)

                    # compute gradient and do Optimization step
                    optimizer.zero_grad()
                    losses.backward()
                    optimizer.step()
            end_time = time.time()
            print("Epoch #: %s" % (i,),end_time-start_time)

            # Exponentially decay epsilon and run test metrics
            if self.replay.isFull():
                self.epsilon = self.epsilon * self.epsilon_decay
                if np.mod(i, 100) == 0:
                    log_state = {'epoch': i + 1, 'state_dict': self.model.state_dict(), 'optimizer' : optimizer.state_dict() }
                    filepath = 'weights/' + output_weights_name + '_' + str(i) + '.h5'
                    torch.save(log_state, filepath)
        return losses


if __name__ == '__main__':
    ## Train Model
    obstacle_grid = np.array([[0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]])
    memory = ReplayMemory(buffer=5000, batchSize=100)
    env = GridWorld(num_cars=2, grid_size=10, terminal_reward=5, demand_limit=2, obstacle_grid=obstacle_grid)

    dqn = DQN(memory=memory, env=env) ##, model_name = './weights/Gridworld_10x10_4Layer_Conv_5.h5')

    epochs = 4000
    episodes = 100
    max_episode_length = 200
    output_weights_name = 'Gridworld_10x10_4Layer_Conv'

    losses = dqn.train(epochs, episodes, max_episode_length, output_weights_name)
    print(losses)


