from collections import deque
from os import read
from tensorflow import keras
import tensorflow as tf
import numpy as np
from MimicryHoneypot import *
from Attacker import *
from MimicryNetEnv import *
import joblib
class DoubleDQNTrainer:
    __input_shape = 0
    __n_outputs = 0
    __explore_steps = 200
    __episode = 5
    __train_start = 50
    __batch_size = 32
    __discount_rate = 0.95

    __env = None
    __optimizer = None
    __loss_fn = None
    __replay_memory = None

    model = None
    target = None
    model_path = "/etc/DQNModel.h5"
    @staticmethod
    def createDQN():
        dqn = keras.models.Sequential([
            keras.layers.Dense(100, activation="elu", input_shape=[5]),
            keras.layers.Dense(50, activation="elu"),
            keras.layers.Dense(50, activation="elu"),
            keras.layers.Dense(32)
            ])
        return dqn

    def __init__(self, episode=600, explore_steps=200, train_start=100):
        self.__episode = episode
        self.__explore_steps = explore_steps
        self.__train_start = train_start
        self.__input_shape = [5]
        self.__n_outputs = 32
        # initalize dqn model
        self.model = self.createDQN()
        self.target = keras.models.clone_model(self.model)
        self.target.set_weights(self.model.get_weights())
        # set up mimcry honeypot 
        self.__env = MimicryNetEnv()

        # set up experience buffer
        self.__replay_memory = deque(maxlen=2000)

        # set optimizer 
        self.__optimizer = keras.optimizers.Adam(lr=1e-3)

        # set loss function
        self.__loss_fn = keras.losses.mean_squared_error

        ## initialize seeds
        tf.random.set_seed(32)
        np.random.seed(32)

        
    def explore_env(self,state,epsilon = 0):
        """Explore environment by epsilon-greedy-policy.
            agent will act randomly or choose the action that
            will get the hightest Q-value in the feature.The choice is based on 
            hyper-peremeter epsilon.

        Args:
        ------
            state (mimicry honeypot sate): mimicry honeypot state 
            epsilon (int, optional): [description]. Defaults to 0.

        Returns:
        ---------
            action[int]: the next action used to explore environment
        """
        if np.random.rand() < epsilon:
            return np.random.randint(32)
        else:
            Q_values = self.model.predict(state[np.newaxis])
            return np.argmax(Q_values[0])
    
    def sample_experiences(self,batch_size):
        """Sample a batch of experience vectors from replay memory

        Args:
        ------
            batch_size (int): how many exp vectors will get from replay memory

        Returns:
        -------

            states(array of state): all states of one batch
            actions(array of int): actions taken in exps in a batch
            rewards(array of int):rewards get in each exp in a batch
            next_states(array of state):the next states in each exp in a batch
            dones(boolean):if the episode is finished
        """
        indices = np.random.randint(len(self.__replay_memory), size=batch_size)
        batch = [self.__replay_memory[index] for index in indices]
        states, actions, rewards, next_states, dones = [
            np.array([experience[field_index] for experience in batch])
            for field_index in range(5)]
        return states, actions, rewards, next_states, dones

    def play_one_step(self,state,epsilon):
        action = self.explore_env(state,epsilon)
        # print("action",action)
        next_state,reward,done = self.__env.step(action)
        self.__replay_memory.append((state,action,reward,next_state,done))
        return next_state,reward,done

    def training_step(self,batch_size):
        experiences = self.sample_experiences(batch_size)
        states, actions, rewards, next_states, dones = experiences
        next_Q_values = self.model.predict(next_states)
        best_next_actions = np.argmax(next_Q_values, axis=1)
        next_mask = tf.one_hot(best_next_actions, self.__n_outputs).numpy()
        next_best_Q_values = (self.target.predict(next_states) * next_mask).sum(axis=1)
        target_Q_values = rewards + (1 - dones) * self.__discount_rate * next_best_Q_values
        mask = tf.one_hot(actions, self.__n_outputs)
        with tf.GradientTape() as tape:
            all_Q_values = self.model(states)
            Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
            loss = tf.reduce_mean(self.__loss_fn(target_Q_values, Q_values))
        grads = tape.gradient(loss, self.model.trainable_variables)
        self.__optimizer.apply_gradients(zip(grads, self.model.trainable_variables))


    
    def train(self):
        min_rewards = []
        mean_rewards = []
        mean_rewards2 = []
        for episode in range(self.__episode):
            min_score = 200
            score_sum = 0
            score_sum2 = 0
            state = self.__env.reset()
            for step in range(self.__explore_steps):
                epsilon = max(1 - episode / 300,0.01)
                state,reward,done = self.play_one_step(state,epsilon)
                score_sum += (1 / reward)
                score_sum2 += reward
                if done:
                    break
                if reward < min_score:
                    min_score = reward
            
            print("episode{},min_reward:{},mean_reward:{}".format(episode,min_score,self.__explore_steps / score_sum))
            min_rewards.append(min_score)
            mean_rewards.append(self.__explore_steps / score_sum)
            mean_rewards2.append(score_sum2 / self.__explore_steps)
            
            if episode > self.__train_start:
                self.training_step(batch_size=self.__batch_size)
            if episode % 50 == 0:
                self.target.set_weights(self.model.get_weights())
        return min_rewards,mean_rewards,mean_rewards2
    
if __name__ == "__main__":
    trainer = DoubleDQNTrainer(10,200,5)
    scores = trainer.train()
    trainer.model.save_weights(trainer.model_path)





