import numpy as np
import os
import torch
import torch.optim as optim
import sys

from torch.utils.tensorboard import SummaryWriter

from basic_models.config import ConfigSet
from basic_models.utils import save_model

from pathlib import Path

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# 从命令行获取参数
configSet = ConfigSet()
args = configSet.execute_args_from_cmd(sys.argv[1:])
print(args)
max_step, n_episode = (args.env_param_dict['max_step'], args.env_param_dict['n_episode'])
n_epoch, hidden_dim, epsilon, GAMMA = (args.n_epoch, args.hidden_dim, args.epsilon, args.GAMMA)
SEED, i_episode, batch_size, score, l_rate = (args.SEED, args.i_episode, args.batch_size, args.score, args.l_rate)
env_difficulty_level, model_name = (args.env_difficulty_level, args.model_name)
env_name = args.env

# 设置日志保存路径
model_dir = Path('./ablation_exps') / env_name / env_difficulty_level / model_name
if not model_dir.exists():
    current_run = 'run1'
else:
    exist_run_nums = [int(str(folder.name).split('run')[1][0]) for folder in
                      model_dir.iterdir() if
                      str(folder.name).startswith('run')]
    if len(exist_run_nums) == 0:
        current_run = 'run1'
    else:
        current_run = 'run%i' % (max(exist_run_nums) + 1)
run_dir = model_dir / (current_run + '_SEED_{0}'.format(SEED))

# 设置Tensorboard日志
writer = SummaryWriter(str(run_dir))
writer.add_text('train info', str(args), 0)
writer.add_scalar('map_{0}_level_{1}_model_{2}_seed_{3}'.format('Surviving',
                                                                env_difficulty_level, model_name, SEED), 999, 0)

f = open(Path(run_dir) / 'info.txt', 'w')

# 设置CUDA环境
device = torch.device('cuda:0'
                      if torch.cuda.is_available()
                      else 'cpu')

# 设置随机种子
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)

# 设置环境
env, model, model_tar = configSet.get_env_and_model()
detached_input_dim = env.detached_obs_one_length

model = model.to(device)
model_tar = model_tar.to(device)

# 初始化经验回放池
buff = configSet.get_buffer()

# 设置优化器
optimizer = optim.Adam(model.parameters(), lr=l_rate)

#
n_ant = env.n_agent
observation_space = env.len_obs
n_actions = env.n_action

# 初始化矩阵
current_O = np.ones((batch_size, n_ant, 3, detached_input_dim))
Next_O = np.ones((batch_size, n_ant, 3, detached_input_dim))
Matrix = np.ones((batch_size, n_ant, n_ant))
Next_Matrix = np.ones((batch_size, n_ant, n_ant))

best_reward = -100
used_adj = None
all_adj = None
while i_episode < n_episode:

    if i_episode > 100:
        epsilon -= 0.0004
        if epsilon < 0.1:
            epsilon = 0.1
    i_episode += 1
    steps = 0
    obs, adj = env.reset()

    # 一个新的环境中，只运行max_step步，
    while steps < max_step:
        steps += 1
        action = []
        if isinstance(obs, list):
            obs = [obs]
        elif isinstance(obs, torch.Tensor):
            obs = torch.unsqueeze(obs, 0)
        else:
            raise TypeError(f'Unknown type {type(obs)}')

        pre_obs = torch.Tensor(obs).to(device)
        q, used_adj = model(pre_obs, torch.Tensor(adj).to(device))
        q = q[0]
        for i in range(n_ant):
            if np.random.rand() < epsilon:
                a = np.random.randint(n_actions)
            else:
                a = q[i].argmax().item()
            action.append(a)

        next_obs, next_adj, reward, terminated = env.step(action)
        # env.show_frame()
        if terminated:
            _, _ = env.reset()
        else:
            buff.add(np.array(obs), action, reward, np.array(next_obs), adj, next_adj, terminated)
            obs = next_obs
            all_adj = adj
            adj = next_adj
            score += sum(reward)

    # 每运行20次游戏，打印一次结果。
    if i_episode % 20 == 0:
        print('i_episode {0}: score {1}; epsilon {2}; lr {3}'.format(i_episode, score / 2000, epsilon, l_rate))
        f.write(f'{i_episode} {score / 20000}\n')
        f.flush()
        writer.add_scalar('score', score / 2000, i_episode)
        writer.add_scalar('epsilon', epsilon, i_episode)
        writer.add_scalar('adj', used_adj.sum() / all_adj.sum(), i_episode)
        if score / 2000 > best_reward:
            save_model(model, optimizer, i_episode, run_dir / 'best.pth')
            best_reward = score / 2000
        score = 0

    # 小于100次游戏时，不计算loss，也不反向传播
    if i_episode < 100:
        continue

    # 运行结束一次游戏后，训练的n_epoch次。包含取样和计算LOSS。取样batch_size * n_epoch
    epoch_loss = []
    for e in range(n_epoch):

        batch = buff.getBatch(batch_size)
        for j in range(batch_size):
            sample = batch[j]
            current_O[j] = sample[0]
            Next_O[j] = sample[3]
            Matrix[j] = sample[4]
            Next_Matrix[j] = sample[5]

        processed_O = torch.Tensor(current_O).to(device)
        q_values, _ = model(processed_O, torch.Tensor(Matrix).to(device))

        tar_processed_O = torch.Tensor(Next_O).to(device)
        target_q_values, _ = model_tar(tar_processed_O, torch.Tensor(Next_Matrix).to(device))

        target_q_values = target_q_values.max(dim=2)[0]
        target_q_values = np.array(target_q_values.cpu().data)
        expected_q = np.array(q_values.cpu().data)

        for j in range(batch_size):
            sample = batch[j]
            for i in range(n_ant):
                expected_q[j][i][sample[1][i]] = sample[2][i] + (1 - sample[6]) * GAMMA * target_q_values[j][i]

        loss = (q_values - torch.Tensor(expected_q).to(device)).pow(2).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        writer.add_scalar('loss/loss_{0}'.format(e), loss, i_episode)
        epoch_loss.append(loss.detach().cpu())
    # scheduler.step()
    writer.add_scalar('loss/loss_mean', np.array(epoch_loss).mean(), i_episode)

    # 每进行5次游戏后，更新目标网络。也可以看出来是off_policy策略。
    # 即训练的智能体不是直接与环境交互的智能体，并且经验池不会在一次参数更新后就清空
    if i_episode % 5 == 0:
        model_tar.load_state_dict(model.state_dict())

f.close()
