from IPython.display import clear_output
import random
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import copy
from grid_world_v2 import GridWorld
from replay_memory import ReplayMemory
from GridWorld_Game_Formulation_Final import DQN,Target_model
import torch.nn as nn
import torch
import math
import render


class Tester(object):
    def __init__(self, env, weights_path): ###network,
        self.env = env
        # if network == None:
        #     network = self.createNetwork()
        # self.network = self.loadNetwork(network)
        self.network = Target_model()
        self.loadedModel = self.loadWeights(weights_path, self.network)
        self.cust_episode = None
        self.Naive_Game_Histories = []
        self.Model_Game_Histories = []

    def loadNetwork(self, netowrk_name):
        json_file = open('models/' + str(netowrk_name), 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = model_from_json(loaded_model_json)
        return loaded_model

    def loadWeights(self, weights_path, network):
        checkpoint = torch.load(weights_path, map_location=torch.device('cpu'))
        network.load_state_dict(checkpoint['state_dict'])
        return network

    def loopPolicy(self, games, max_test_len):
        naive_moves, model_moves, naive_reward, model_reward = 0, 0, 0, 0
        for i in range(games):
            print('Game: ', i)
            self.env.reset()
            self.generateDemandEpisode(max_test_len + 1)

            # print 'Naive:'
            naive_moves = naive_moves + self.runEpisodeFixedCustomer('Naive', 1)
            # naive_reward = naive_reward + self.runEpisodeFixedTime('Naive',1)

            # print 'Model:'
            model_moves = model_moves + self.runEpisodeFixedCustomer(self.loadedModel, 0)
            # model_reward = model_reward + self.runEpisodeFixedTime(self.loadedModel,0)

            clear_output(wait=True)
        naive_avg_moves = naive_moves / float(games)
        model_avg_moves = model_moves / float(games)

        # naive_avg_reward = naive_reward/float(games)
        # model_avg_reward = model_reward/float(games)

        print ('Naive avg moves: ', naive_avg_moves)
        print ('Model avg moves: ', model_avg_moves)
        # print 'Naive avg reward: ', naive_avg_reward
        # print 'Model avg reward: ', model_avg_reward
        return (naive_avg_moves, model_avg_moves)

    def runEpisodeFixedCustomer(self, model, epsilon):
        terminal = False
        env_copy = copy.deepcopy(self.env)
        env_copy.reset(cust_popup_episode=self.cust_episode, cars_list=self.env.cars_list)
        while (not terminal and env_copy.t < len(self.cust_episode) - 1):
            # env_copy.print_grid()
            # Bookeeping
            curr_history = {}
            curr_history['t'] = env_copy.t
            curr_history['curr_state'] = (env_copy.cars_grid.copy(), env_copy.cust_grid.copy())
            all_agents_step = env_copy.stepAll(model, epsilon)

            # More Bookeeping
            curr_history['actions'] = all_agents_step
            curr_history['next_state'] = (env_copy.cars_grid.copy(), env_copy.cust_grid.copy())
            env_copy.history.append(curr_history)

            terminal = env_copy.isTerminal()
            env_copy.t += 1

        if model == 'Naive':
            self.Naive_Game_Histories.append(env_copy.history)
        else:
            self.Model_Game_Histories.append(env_copy.history)

        return env_copy.t

    def runEpisodeFixedTime(self, model, epsilon):
        env_copy = copy.deepcopy(self.env)
        env_copy.reset(cust_popup_episode=self.cust_episode, cars_list=self.env.cars_list)
        episode_reward = 0
        while (env_copy.t < len(self.cust_episode) - 1):
            # env_copy.print_grid()
            # Bookeeping
            curr_history = {}
            curr_history['t'] = env_copy.t
            curr_history['curr_state'] = (env_copy.cars_grid.copy(), env_copy.cust_grid.copy())

            all_agents_step = env_copy.stepAll(model, epsilon)

            # More Bookeeping
            curr_history['actions'] = all_agents_step
            curr_history['next_state'] = (env_copy.cars_grid.copy(), env_copy.cust_grid.copy())
            env_copy.history.append(curr_history)

            for mem in all_agents_step:
                if mem['event'] == 'pickup':
                    episode_reward = episode_reward + mem['reward']
            env_copy.t += 1

        if model == 'Naive':
            self.Naive_Game_Histories.append(env_copy.history)
        else:
            self.Model_Game_Histories.append(env_copy.history)

        return episode_reward

    def generateDemandEpisode(self, timesteps):
        customer_episodes = []
        for t in range(timesteps):
            size = self.env.grid_size
            cust_popup = np.zeros((size, size))
            for i in range(size):
                for j in range(size):
                    if (random.random() < self.env.demand_init_prob[i, j]):
                        random_waittime = np.random.randint(self.env.minWaitTime, self.env.maxWaitTime + 1)
                        cust_popup[i, j] = random_waittime
            customer_episodes.append(cust_popup)
        self.cust_episode = customer_episodes

    def print_history(self, history):
        size = self.env.grid_size
        action_options = {0: "stay", 1: "left", 2: "up", 3: "right", 4: "down"}
        for hist in history:
            cars_grid = hist["curr_state"][0]
            cust_grid = hist["curr_state"][1]
            car_events = hist["actions"]
            narrations = []
            print("==================================")
            print("At t = {0}".format(hist["t"]))
            print(" " + "------" * (size - 1))
            state_to_print = np.zeros((size, size), dtype='<U2')
            state_to_print[state_to_print == ''] = '  '
            # customers
            state_to_print[cust_grid > 0] = 'De'
            # obstacles
            state_to_print[self.env.obstacle_grid > 0] = '@@'
            # cars
            for car in self.env.cars_list:
                state_to_print[(cars_grid) == car.carID] = 'C{0}'.format(car.carID)
            state_to_print[(cust_grid > 0) & (cars_grid > 0)] = 'CD'

            for car_event in car_events:
                narrations.append("C{0} chose {1}, {2} -> {3}. And its event type is {4}".format(
                    car_event['carID'], action_options[car_event['action']],
                    car_event['original_location'], car_event['new_location'], car_event['event']))
            print('| ' + '\n| '.join([' . '.join(i) + ' |' for i in state_to_print]))
            print (" " + "------" * (size - 1))
            for n in narrations:
                print(n)
        cars_grid = hist["next_state"][0]
        cust_grid = hist["next_state"][1]
        print ("==================================")
        print("At final".format(hist["t"]))
        print (" " + "------" * (size - 1))
        state_to_print = np.zeros((size, size), dtype='<U2')
        state_to_print[state_to_print == ''] = '  '
        # customers
        state_to_print[cust_grid > 0] = 'De'
        # obstacles
        state_to_print[self.env.obstacle_grid > 0] = '@@'
        # cars
        for car in self.env.cars_list:
            state_to_print[(cars_grid) == car.carID] = 'C{0}'.format(car.carID)
        state_to_print[(cust_grid > 0) & (cars_grid > 0)] = 'CD'
        print ('| ' + '\n| '.join([' . '.join(i) + ' |' for i in state_to_print]))
        print ( " " + "------" * (size - 1))

    def pygame_history(self, history):
        size = self.env.grid_size
        matrix = np.zeros([7, size, size])
        dra = render.Draw(matrix)
        for hist in history:
            matrix = np.zeros([7, size, size])
            for j in range(4):
                matrix[3][j + 3, 0] = 1
                matrix[3][j + 3, 1] = 1
                matrix[3][j + 3, 2] = 1
                matrix[3][j + 3, 3] = 1
                matrix[3][j + 3, 6] = 1
                matrix[3][j + 3, 7] = 1
                matrix[3][j + 3, 8] = 1
                matrix[3][j + 3, 9] = 1

            cars_grid = hist["curr_state"][0]
            carsindex = np.argwhere(cars_grid > 0)
            for ca in carsindex:
                matrix[0][ca[1], ca[0]] = 1
            cust_grid = hist["curr_state"][1]
            custindex01 = np.argwhere(cust_grid == 1)
            for cu in custindex01:
                matrix[1][cu[1], cu[0]] = 1
            custindex02 = np.argwhere(cust_grid == 2)
            for cu in custindex02:
                matrix[2][cu[1], cu[0]] = 1
            custindex03 = np.argwhere(cust_grid == 3)
            for cu in custindex03:
                matrix[4][cu[1], cu[0]] = 1
            custindex04 = np.argwhere(cust_grid == 4)
            for cu in custindex04:
                matrix[5][cu[1], cu[0]] = 1
            custindex05 = np.argwhere(cust_grid == 5)
            for cu in custindex05:
                matrix[6][cu[1], cu[0]] = 1

            dra.update(matrix)
            time.sleep(0.2)


    def plot(self, games, max_game_length, saved_weights_freq, max_epochs, file_name):
        epoch = []
        naive_list = []
        model_list = []
        for i in range(1, int(max_epochs / saved_weights_freq) + 1):
            index = i * saved_weights_freq
            self.loadedModel = self.loadWeights(file_name, self.network)
            print ('Loaded: ', file_name)
            epoch.append(index)
            naive_avg_moves, model_avg_moves = tester.loopPolicy(games, max_game_length)
            naive_list.append(naive_avg_moves)
            model_list.append(model_avg_moves)

        # return epoch, naive_list, model_list
        plt.plot(epoch, naive_list, epoch, model_list)

if __name__ == '__main__':

    # Test Single Model
    obstacle_grid = np.array([[0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                              [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],
                              [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]])
    env = GridWorld(num_cars=2, grid_size=10, terminal_reward=500, demand_limit=2, obstacle_grid=obstacle_grid)
    ###env = GridWorld(num_cars=2, grid_size=7, terminal_reward=5,customer_rate=0.04, demand_limit=1, cust_wait_time=10, obstacle_grid=obstacle_grid)

    tester = Tester(env, './weights/Gridworld_10x10_4Layer_Conv_1500.h5')

    tester.loopPolicy(10, 500)

    ## Plot Performance Over Epochs
    # tester.plot(10, 50, 20, 30, './weights/Gridworld_10x10_4Layer_Conv_50.h5')


    ###### Show Game History
    #tester.pygame_history(tester.Naive_Game_Histories[4])
    #for data01 in tester.Naive_Game_Histories:
    #    tester.pygame_history(data01)

    tester.pygame_history(tester.Model_Game_Histories[4])






    """
    ## Cluster Situations
    a = np.array(tester.Model_Game_Histories[2][0]['curr_state'])
    b = a.reshape(1, int(tester.env.observation_shape / 2))
    print(a)
    print(b)
    
    starting_states = []
    for game in tester.Model_Game_Histories:
        state = np.array(game[0]['curr_state'])
        starting_states.append(state.reshape(1, int(tester.env.observation_shape / 2) )[0])
    print(len(starting_states))
    
    from sklearn.cluster import KMeans
    print(starting_states[0])
    
    kmeans = KMeans(n_clusters=4, random_state=0).fit(np.array(starting_states))
    ## kmeans.labels_
    indices = [i for i, x in enumerate(kmeans.labels_) if x == 3]
    # tester.print_history(tester.Model_Game_Histories[-1])
    """


