import random
import tensorflow as tf
# from tensorflow.keras import layers

from keras.layers import Conv1D,Dense,Flatten,Input,Activation,Reshape
from keras.layers import RepeatVector, Dense, Activation, Lambda,Concatenate
from keras.models import Model
import keras.backend as K
from keras.optimizers import Adam
from collections import deque
import numpy as np
from agent_utils import *

class DQNAgent(object):

    def __init__(self, obs_shape, action_size):
        self.observe_shape = obs_shape
        self.action_size = action_size
        self.memory = deque(maxlen=2000)
        self.gamma = 0.95    # discount rate
        self.epsilon = 1.0  # exploration rate
        self.epsilon_min = 0.02
        self.epsilon_max = 1.0
        self.epsilon_decay = 0.9997
        self.eps_decay_steps = 2000
        self.learning_rate = 0.001
        self.model = self._build_model()
        self.target_model = self._build_model()
        self.tau = 0.3
        self.update_target_model()

        # self.best_model = self._build_model()

    """Huber loss for Q Learning
    References: https://en.wikipedia.org/wiki/Huber_loss
                https://www.tensorflow.org/api_docs/python/tf/losses/huber_loss
    """

    def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
        error = y_true - y_pred
        cond = K.abs(error) <= clip_delta

        squared_loss = 0.5 * K.square(error)
        quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)

        return K.mean(tf.where(cond, squared_loss, quadratic_loss))

    def _attention_on_task(self, group_input: Input, task_input: Input):
        '''

        str: 定义task_slot特征向量到动作空间的注意力机制
        slot[i].shape = [12,4]
        slot.shape = [12,4*M] #M = 5
        action_space = M*group #group = 3
        slot[i].encoder = ( sum(x1),sum(x2),sum(x3),sum(x4) )/ 12
        slot[i].encoder.shape = [1,4]
        slot_encode.shape = [1,4*M]

        group[j].shape = [12,4]
        group[j]

        这里定义一个group对应的 attention

        '''

        M = self.observe_shape[1][1] // 4
        GROUP = self.observe_shape[0][1] // 4
        # define component
        repeator = RepeatVector(n=M)
        concatenator = Concatenate(axis=2)
        densor1 = Dense(6, activation='tanh')
        densor2 = Dense(1, activation='relu')
        activitor = Activation(activation=softmax, name='attention_weights')

        group_encode = Lambda(encode)(group_input)
        group_encode = Reshape((GROUP, 4))(group_encode)

        task_encode = Lambda(encode)(task_input)
        task_encode = Reshape((M, 4), name='task_encoder')(task_encode)

        move_densor1 = Dense(1, activation='tanh', name='move_tanh')
        move_densor2 = Dense(1, activation='sigmoid', name='move_action')

        moves = []
        outputs = []

        for j in range(GROUP):
            # for i in range(M):
            #     x_i=Lambda(slice,output_shape=(4, 1), arguments={'i':i})(task_slot_encode)
            # one step attention
            group_j = Lambda(slice, output_shape=(1, 4), arguments={'i': j})(group_encode)

            group_j_r = repeator(group_j)  # shape to (M,4)
            # print(group_j_r.shape)
            # print(task_encode.shape)
            # task_encode的shape为（M,4),task_input的shape为(12,4*M)
            # task_encode_1 = mean(task_input) 为(1,4*M)

            concat = concatenator([group_j_r, task_encode])

            e = densor1(concat)
            energies = densor2(e)
            n1 = M
            group_move = Reshape(target_shape=(n1,))(energies)
            group_move1 = move_densor1(group_move)
            moves.append(group_move1)
            # alphas 表示当前的task 与worker之间的权重

            alphas = activitor(energies)

            outputs.append(alphas)

        out = Concatenate(axis=1)(outputs)
        # print(out.shape)
        n1 = M * GROUP
        out1 = Reshape(target_shape=(n1,))(out)
        # print(out1.shape)
        move_on = Concatenate(axis=1)(moves)
        move_on = move_densor2(move_on)

        out_all = Concatenate(axis=1, name='all_actions')([out1, move_on])

        return out_all

    def _build_model(self):

        # define componects
        transposer = Lambda(transpose, name='transpose')
        graph_encoder1 = Dense(8,activation='tanh',name='graph_densor1')
        graph_encoder2 = Dense(2,activation='relu',name='graph_densor2')

        reshapor = Reshape(target_shape=(self.observe_shape[2][0], 16),name='reshape_G')
        # Neural Net for Deep-Q learning Model
        W_input = Input(self.observe_shape[0], name="worker_obs")
        S_input = Input(self.observe_shape[1], name="slot_obs")
        G_input = Input(self.observe_shape[2], name="graph_input")

        # obs = Concatenate(axis=-1)([W_input,S_input])
        # atention model for action advantages
        # outputs.shape = [1,group*M]
        # 用注意力模型定义优势函数
        advantages = self._attention_on_task(W_input, S_input)

        W_t = transposer(W_input)
        S_t = transposer(S_input)
        G_t = reshapor(G_input)

        conv1 = Conv1D(filters=2,kernel_size=4,strides=4,padding='valid')(W_t)
        conv1_s = Conv1D(filters=2,kernel_size=4,strides=4,padding='valid')(S_t)
        conv1_g = Conv1D(filters=2,kernel_size=1,strides=1,padding='valid')(G_t)
        concat1 = Concatenate(axis=1)([conv1,conv1_s,conv1_g])
        X1 = Activation('relu',name='concat_relu')(concat1) # shape(?,group+M+planes,2)

        # X2 = reshapor(G_input)
        # X2 = graph_encoder1(X2)
        # X2 = graph_encoder2(X2) #shape=(?,2)
        X = Flatten()(X1)  # shape= (?,(group+M+plane_num)*2)
        # X = Concatenate(axis=1)([X1,X2]) # shape= (?,group+M+2)

        X = Dense(units=10, activation='tanh')(X)

        V = Dense(units=1,name="Value_function")(X)

        Q = Lambda(lambda i: V + i - K.mean(i, keepdims=True),
               output_shape=(self.action_size,))(advantages)
        # X = Dense(units=50, activation='tanh')(X)
        # X = Dense(self.action_size)(X)
        # q_values = Lambda(lambda x: x)(X)
        #
        # model = Model(inputs=[X_input], outputs=q_values, name="ScheduleModel")
        # # model = Sequential()
        # # model.add(Dense(24, input_dim=self.state_size, activation='relu'))
        # # model.add(Dense(24, activation='relu'))
        # # model.add(Dense(self.action_size, activation='linear'))
        # model.compile(loss='mse',
        #               optimizer=Adam(lr=self.learning_rate))
        # return model
        model = Model(inputs=[W_input, S_input, G_input], outputs=Q, name='Schedule_model')
        model.compile(loss='mse',\
                       optimizer=Adam(lr=self.learning_rate))
        return model

    def update_target_model(self):
        """ Transfer Weights from Model to Target at rate Tau
               """
        W = self.model.get_weights()
        tgt_W = self.target_model.get_weights()
        for i in range(len(W)):
            tgt_W[i] = self.tau * W[i] + (1 - self.tau) * tgt_W[i]
        self.target_model.set_weights(tgt_W)

        # copy weights from model to target_model
        # self.target_model.set_weights(self.model.get_weights())
    def update_best_model(self):
        self.best_model.set_weights(self.model.get_weights())

    def memorize(self, obs, action, reward, next_obs, done):
        self.memory.append((obs, action, reward, next_obs, done))

    def act(self, obs_list,step):
        if np.random.rand() <= self.epsilon:
            return random.randrange(self.action_size)
        self.epsilon = max(self.epsilon_min, self.epsilon_max - \
                           (self.epsilon_max - self.epsilon_min) * step / self.eps_decay_steps)
        act_values = self.model.predict(obs_list)
        return np.argmax(act_values[0])  # returns action

    def replay(self, batch_size):

        mini_batch = random.sample(self.memory, batch_size)
        # states_W = np.zeros(len(mini_batch),)
        for state, action, reward, next_state, done in mini_batch:
            target = self.model.predict(state)
            # if done:
            #     target[0][action] = reward
            # else:
            #     # a = self.model.predict(next_state)[0]
            t = self.target_model.predict(next_state)[0]
            target[0][action] = reward + self.gamma * np.amax(t)*(1-int(done))
                # target[0][action] = reward + self.gamma * t[np.argmax(a)]

            self.model.fit(state, target, epochs=1, verbose=0)
        # if self.epsilon > self.epsilon_min:
        #     self.epsilon *= self.epsilon_decay

    def load(self, name):
        self.model.load_weights(name)

    def save(self, name):
        self.model.save_weights(name)
