import torch

from torch.utils.tensorboard import SummaryWriter

import RL_agents.Mult_PPO as PPO
import Models.agent_models as models

import os
import argparse
import yaml

from RL_agents.Mult_env import Mult_Envs_Stepper, sub_train_env



def get_args():
    parser = argparse.ArgumentParser(
        """Implementation of model described in the paper: Proximal Policy Optimization Algorithms""")
	
    parser.add_argument('--gpu',default=0)
    parser.add_argument('--lr', type=float, default=5e-5)
    parser.add_argument('--lr_c', type=float, default=1.0)
    parser.add_argument('--gamma', type=float, default=0.99, help='discount factor for rewards')
    parser.add_argument('--lambda_', type=float, default=0.95, help='parameter for GAE')
    parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
    parser.add_argument('--epsilon', type=float, default=0.1, help='parameter for Clipped Surrogate Objective')
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--num_epochs', type=int, default=int(1e6))
    parser.add_argument('--pic_size', type=tuple, default=(96, 96))
    parser.add_argument('--train_times', type=int, default=5)          # 平均每个数据采样学习多少次
    parser.add_argument("--num_processes", type=int, default=4)
    parser.add_argument("--save_interval", type=int, default=50, help="Number of steps between savings")
    parser.add_argument("--plot_interval", type=int, default=10, help="Number of steps between plot result")
    parser.add_argument("--max_actions", type=int, default=400, help="Maximum repetition steps in test phase")
    parser.add_argument("--name", type=str, default="MINI_RESNET18")
    parser.add_argument("--map", type=str, default="real.json")
    parser.add_argument("--end_points", type=tuple, default=((497,204),(402,159),(423,290)))
    parser.add_argument("--radius", type=float, default=3)
    parser.add_argument("--keep", type=bool, default=False)  # 环境训练中是否保持导丝的位置
    args = parser.parse_args()
    return args



# 读取现有模型已训练的世代数
def load_epo(PATH):
    try:
        with open(PATH, "r") as f:
            epo = yaml.load(f, Loader=yaml.SafeLoader)
        print("epo:", epo)
    except Exception as e:
        print(f"{e}/nepo: 0")
        epo = 0
    return epo


def write_epo(PATH, epo):
    """将当前世代数写入硬盘"""
    with open(PATH, 'w', encoding='utf-8') as f:
        yaml.dump(epo, f)


def eval_onece(opt, agent,):
    """运行一局并测试性能"""
    env = sub_train_env(opt)
    s = env.reset()
    reward_total = 0
    spend_steps  = 0
    for _ in range(opt.max_actions):
        spend_steps += 1
        a, _, _ = agent.select_action_return_v(s)
        s1, r, d, _ = env.step(a, render=False)
        s = s1
        reward_total += r
        if d:
            break
    return reward_total, spend_steps, s


def main():
	opt = get_args()
	writer = SummaryWriter(log_dir=f"runs/{opt.name}")
	
	device = torch.device("cuda:{}".format(opt.gpu) if torch.cuda.is_available() else "cpu")
	envs = Mult_Envs_Stepper(num_envs=opt.num_processes, opt=opt)
	model = models.Resnet_18(act_num=5).to(device)
	agent = PPO.PPO_agent(opt)
	agent.load_model(model)
	try:
		agent.load_weights(f"torch_weights/{opt.name}.pt")
		print("load Model")
	except:
		print("New Model")
	begin_epo = load_epo(f".\\torch_weights\\{opt.name}.yaml")

	# 向所有环境发布指令：重置
	[agent_conn.send(("reset", None)) for agent_conn in envs.agent_conns]
	# 从所有环境回收初始状态
	curr_states = [agent_conn.recv() for agent_conn in envs.agent_conns]
	# 保存的损失函数
	losses = []
	for epo in range(begin_epo, begin_epo+opt.num_epochs):
		for i in range(opt.max_actions):
			# 获得一个列表，每个元素是其对应环境的动作，概率，价值。			
			apv = [agent.select_action_return_v(s) for s in curr_states]		
			# 在所有子环境中执行一次动作
			for agent_conn, (a, _, _) in zip(envs.agent_conns, apv):
				agent_conn.send(("step", a))
			# 从所有子环境中接收数据
			s1_r_d_bug = [agent_conn.recv() for agent_conn in envs.agent_conns]
			# 保存单步的数据
			for k, i in enumerate(envs.env_datas):
				s=curr_states[k]
				a=apv[k][0]
				p=apv[k][1]
				r=s1_r_d_bug[k][1]
				v=apv[k][2]
				d=s1_r_d_bug[k][2]
				i.pack_step_data(s, a, p, r, v, d)	
			curr_states = [data[0] for data in s1_r_d_bug]		
		# 提取所有的数据
		states, acts, rewards, probs, values, Gs, advantages = envs.collect_all_datas()
		# 训练神经网络
		losses = agent.learn(states, acts, rewards, probs, Gs, advantages)
		# 清空所有的数据
		[datas.clear() for datas in envs.env_datas]


		if losses is not None:
			writer.add_scalar('Loss/actor', losses[0], epo)
			writer.add_scalar('Loss/critic', losses[1], epo)
			writer.add_scalar('Loss/entropy', losses[2], epo)
			writer.add_scalar('Loss/all', losses[3], epo)


		if ((epo % opt.plot_interval == 0)):
			# 向所有环境发布指令：重置
			[agent_conn.send(("reset", None)) for agent_conn in envs.agent_conns]
			# 从所有环境回收初始状态
			curr_states = [agent_conn.recv() for agent_conn in envs.agent_conns]
			total, steps, img = eval_onece(opt, agent)
			writer.add_scalar('Reward/reward', total, epo)
			writer.add_scalar('Reward/Spend Steps', steps, epo)
			writer.add_image("Last_result Image", s.reshape(opt.pic_size[0], opt.pic_size[1]), 
				global_step=epo, dataformats="HW",)
			
			if not os.path.exists("./torch_weights"):
				os.mkdir("./torch_weights")
			torch.save(agent.model.state_dict(), f".\\torch_weights\\{opt.name}.pt")


			print("weights saved")
			write_epo(f".\\torch_weights\\{opt.name}.yaml", epo)
			print(f"# of episode :{epo}, score : {total}, steps :{steps}")

		if ((epo % opt.save_interval == 0)):
			if not  os.path.exists("./torch_weights/time_capsule"):
				os.mkdir("./torch_weights/time_capsule")
			torch.save(agent.model.state_dict(), f".\\torch_weights\\time_capsule\\{opt.name}_{epo}.pt")
 

if __name__ == '__main__':
	main()
