# 主要是q目标网络的保存和更新。
# 也包括一些对网络的评估之类
# 更新步骤:
# 包括输出的q值loss 为一个loss值，进行平滑处理（+ = loss/num_of_disply)
# 然后将next_state输入到actor critic 得到q_target 之后与得到的q值计算真正的q值，平方误差，反向传递。
# 得到actor的diff 然后进行反向传递更新actor
# 难以想象会得到更新，可能是通过拟合能力，提炼出的规律，q值多大时的动作是多少。相互影响，有点ddpg的感觉 
# 将状态输入actor，输出一个动作。根据输出的动作和状态输出q值。然而这个q值也只是用来画图
#
from model import *
import torch, os
import torch.optim as optim
import torch.nn.functional as F
from utils import plot, make_time_major

def dqn(data,args):
    #网络构建
    critic = Critic() 
    actor=Actor()
    optimizer = optim.RMSprop(critic.parameters(), lr=args.lr, eps=args.epsilon,
                              weight_decay=args.decay,
                              momentum=args.momentum)
    optimactor=optim.Adam(actor.params,lr=0.0001)

    actor.cuda()
    critic.cuda()
    batch_size = args.batch_size 
    baseline_cost = args.baseline_cost
    entropy_cost = args.entropy_cost
    gamma = args.gamma
    save_path = args.save_path
    batch = []
    params_losss = []
    idx = 0
    if os.path.exists(critic_save_path):
        critic.load_state_dict(torch.load(critic_save_path))
        actor.load_state_dict(torch.load(actor_save_path))
    while True:
        trajectory = data.get()
        batch.append(trajectory)
        # TODO 这里应该设置一个记忆池，指定数量，不需要一直等待轨迹
        if torch.cuda.is_available():
            trajectory.cuda()
        if len(batch) < batch_size:
            continue
        #解开batch
        states, next_states, actions, rewards, q, dones = make_time_major(batch)
        actor_act, actor_param = actor(next_state)
        # 计算下一个时态的q值，用来更新当前的真实q值。
        target_q_values = critic(actor_act, actor_param, next_states)
        optimizer.zero_grad()
        optimactor.zero_grad()
        
        # 计算真实q值(随便写，反正调)
        for i in range(dones.size):
            if dones[i]:
                off_policy_target = rewards[i]
            else:
                off_policy_target = rewards[i] + args.gamma * target_q_values[i+1]
            targets[i] = args.beta * q[i] + (1 - args.beta) * off_policy_target
        q_value = critic(actions[0:args.action_space], actions[args.action_space:-1], states)
        loss = (q_value - targets).pow(2).sum()
        loss.backward()
        # 拼接两个网络
        optimizer.step()
        actor.action.grad = critic.x.grad[0,4]
        actor.actor_param.grad = critic.x.grad[4,10]
        actor.action.backward()
        actor.actor_param.backward()
        optimactor.step()

        # print("states",states.shape)
        # print("params",params.shape)
        print("loss:",losst.shape)
        params_loss = (action_param_target - params.reshape(-1,1)).pow(2).sum()/len(action_param_target)
        loss.backward()
        optimizer.step()

        if idx % 100 == 0:
            torch.save(model.state_dict(), save_path)
            params_losss.append(params_loss)
            plot("param_loss", idx, params_losss)

        batch = []
        idx += 1
