import sys
import os
import logging

sys.path.append(os.getcwd())
logging.basicConfig(level=logging.INFO)

from network.dqn_network import DQN
from network.per_dqn_network import PERDQN
from environment.env import Environment
from environment.state import State
from learning.eval import measure_result
from environment.trace_sample import Trace
from config import DRLParameters
import random


def define_network(training: bool, parameter_space, points=999):
    """ 定义环境和网络
    :param parameter_space:
    :param training: 是否为训练模式
    :param points:
    :return:
    """
    random.seed(0)
    env = Environment(parameter_space=parameter_space,
                      trace=Trace(parameter_space=parameter_space,
                                  case_id=random.choice(parameter_space.INITIAL_CASE_IDS)), training=training,
                      num_points=points)
    model = PERDQN(n_state_feature=env.observation_space, n_neurons=parameter_space.N_NEURONS,
                   n_actions=env.action_space.n, memory_capacity=parameter_space.REPLAY_SIZE,
                   lr=parameter_space.LEARNING_RATE, epsilon=parameter_space.EPSILON,
                   env_a_shape=0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape,
                   target_replace_iter=parameter_space.TARGET_REPLACE_ITER, batch_size=parameter_space.BATCH_SIZE,
                   gamma=parameter_space.GAMMA, n_layer=parameter_space.N_LAYERS)
    return env, model


def learning(parameter_space):
    env, model = define_network(training=True, parameter_space=parameter_space)

    rewards = []
    to_learn_ep = parameter_space.N_EPISODES
    for i_episode in range(parameter_space.N_EPISODES):
        s: State = env.reset().clone()
        ep_r = 0
        while True:
            env.render()
            action = model.choose_action(s.feature)
            next_state, reward, done, info = env.step(action)
            model.store_transition(s, action, reward, next_state, done)
            ep_r += reward
            if model.memory_counter > model.memory_capacity:
                if to_learn_ep == parameter_space.N_EPISODES:
                    to_learn_ep = i_episode
                model.learn()
                if done:
                    print('Ep: ', i_episode, '| Ep_r: ', round(ep_r, 2))
                    rewards.append(round(ep_r, 2))
            if done:
                break
            s = next_state.clone()
        if i_episode == to_learn_ep + 750:
            break

    model_path = parameter_space.RESULT_PATH
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    model.save_model(model_path, "nn_model_0.pt")


if __name__ == '__main__':
    # 程序入口

    data_set_name = "bpic2018"  # 数据集名称  # TODO: 需修改
    num_points = 999  # TODO: 需修改
    parameter_spaces = DRLParameters(dataset_name=data_set_name)

    # 学习过程
    learning(parameter_space=parameter_spaces)

    # 评估模型
    _, dqn = define_network(training=False, parameter_space=parameter_spaces, points=num_points)
    save_model_path = parameter_spaces.RESULT_PATH
    dqn.load_model(save_model_path, "nn_model_0.pt")
    print("test measurement:")
    measure_result(dqn, parameter_spaces, training=False, alpha=0, points=num_points)
