import gym
from PPO.agent import *
from configs.PPO_DNN import *
import tensorflow as tf
from util.utils import save_reward


def get_graph_features(env, copyGraph):
    bw_allocated_feature = np.zeros((env_training.numEdges, len(env_training.listofDemands)))
    bw_allocated_feature.fill(0.0)
    # Normalize capacity feature
    capacity_feature = (copyGraph[:, 0] - 100.00000001) / 200.0

    iter = 0
    for i in copyGraph[:, 1]:
        if i == 8:
            bw_allocated_feature[iter][0] = 1
        elif i == 32:
            bw_allocated_feature[iter][1] = 1
        elif i == 64:
            bw_allocated_feature[iter][2] = 1
        iter = iter + 1

    sample = {
        'num_edges': env.numEdges,
        'length': env.firstTrueSize,
        'betweenness': tf.convert_to_tensor(value=env.between_feature, dtype=tf.float32),
        'bw_allocated': tf.convert_to_tensor(value=bw_allocated_feature, dtype=tf.float32),
        'capacities': tf.convert_to_tensor(value=capacity_feature, dtype=tf.float32),
        'first': tf.convert_to_tensor(env.first, dtype=tf.int32),
        'second': tf.convert_to_tensor(env.second, dtype=tf.int32)
    }

    sample['capacities'] = tf.reshape(sample['capacities'][0:sample['num_edges']], [sample['num_edges'], 1])
    sample['betweenness'] = tf.reshape(sample['betweenness'][0:sample['num_edges']], [sample['num_edges'], 1])

    hiddenStates = tf.concat([sample['capacities'], sample['betweenness'],sample['bw_allocated']], axis=1)

    # paddings = tf.constant([[0, 0], [0, hparams['link_state_dim'] - 2 - hparams['num_demands']]])
    # link_state = tf.pad(tensor=hiddenStates, paddings=paddings, mode="CONSTANT")

    neg_numpy = np.array(hiddenStates).reshape(1,-1)  # 37*5 边*特征
    neg_tensor = torch.from_numpy(neg_numpy)    # 边*特征
    return neg_tensor[0]


def get_state_feature(state,env,source,destination,demand):
    path = 0
    listGraphs = []
    # List of graph features that are used in the cummax() call
    list_k_features = []
    pathList = env.allPaths[str(source) + ':' + str(destination)]
    while path < len(pathList):
        state_copy = np.copy(state)
        currentPath = pathList[path]
        i = 0
        j = 1

        # 3. Iterate over paths' pairs of nodes and allocate demand to bw_allocated
        while j < len(currentPath):
            state_copy[env.edgesDict[str(currentPath[i]) + ':' + str(currentPath[j])]][1] = demand
            i = i + 1
            j = j + 1
        # 4. Add allocated graphs' features to the list. Later we will compute their q-values using cummax
        listGraphs.append(state_copy)
        features = get_graph_features(env, state_copy)
        list_k_features.append(features)
        path = path + 1
    neg_numpy = np.array(list_k_features).reshape(ACTION_LEN,-1)  # 4*37*5 action*边*特征
    neg_tensor = torch.from_numpy(neg_numpy)    # action*边*特征
    return neg_tensor


def train(agent, n_episode, n_update=4, scale=1):
    for iters in range(n_episode):
        state_history = []
        action_history = []
        done_history = []
        reward_history = []
        for i in range(train_episodes):
            tf.random.set_seed(1)
            state, demand, source, destination = env_training.reset()
            state_hidden = get_graph_features(env_training, state)
            done = False
            if len(state_history) == 0:
                state_history.append(list(state_hidden))
            else:
                state_history[-1] = list(state_hidden)
            while not done:
                action = agent.act(state_hidden)
                new_state, reward, done, new_demand, new_source, new_destination = env_training.make_step(state, action,
                                                                                                          demand,
                                                                                                          source,
                                                                                                          destination)
                state_hidden = get_graph_features(env_training, new_state)
                action_history.append(action)
                done_history.append(done)
                reward_history.append(reward * scale)
                state = new_state
                demand = new_demand
                source = new_source
                destination = new_destination
                state_history.append(list(state_hidden))

        states, actions, log_probs, rewards, dones = agent.process_data(state_history, action_history,
                                                                        reward_history, done_history, 64)
        for _ in range(n_update):
            agent.learn(states, actions, log_probs, rewards, dones)

        rewards_log = []
        for i in range(EVALUATION_EPISODES):
            tf.random.set_seed(1)
            state, demand, source, destination = env_eval.reset()
            done = False
            episodic_reward = 0
            while not done:
                state_hidden = get_graph_features(env_training, state)
                action = agent.act(state_hidden)
                new_state, reward, done, new_demand, new_source, new_destination = env_eval.make_step(state, action,
                                                                                                          demand,
                                                                                                          source,
                                                                                                          destination)
                episodic_reward += reward
                state = new_state
                demand = new_demand
                source = new_source
                destination = new_destination
            rewards_log.append(episodic_reward)
        save_reward('PPO_DNN-' + str(graph_topology), iters, np.mean(rewards_log))
        print(iters, np.mean(rewards_log))


if __name__ == '__main__':
    env_training = gym.make(ENV_NAME)
    np.random.seed(SEED)
    env_training.seed(SEED)
    env_training.generate_environment(graph_topology, listofDemands)

    env_eval = gym.make(ENV_NAME)
    np.random.seed(SEED)
    env_eval.seed(SEED)
    env_eval.generate_environment(graph_topology, listofDemands)

    ACTION_LEN = 4
    STATE_SIZE = env_training.numEdges*5

    agent = Agent_discrete(state_size=STATE_SIZE,
                           action_size=ACTION_LEN,
                           lr=LEARNING_RATE,
                           beta=BETA,
                           eps=EPS,
                           tau=TAU,
                           gamma=GAMMA,
                           device=DEVICE,
                           hidden=HIDDEN_DISCRETE,
                           share=SHARE,
                           mode=MODE,
                           use_critic=CRITIC,
                           normalize=NORMALIZE)
    train(agent=agent,
          n_episode=RAM_NUM_EPISODE,
          n_update=N_UPDATE,
          scale=SCALE)
