import torch.optim
import numpy as np
import torch.nn.functional as F
from agent.AgentBase import AgentBase
from agent.Nets import Qnet
from agent.VisionNet import ConvolutionQnet
import os


class DoubleDQN(AgentBase):
    """Double DQN算法"""

    def __init__(self, state_dim, hidden_dim, action_dim, hidden_layers, learning_rate, gamma,
                 device, isConv=False, input_shape=None, args=None):
        super(DoubleDQN, self).__init__(args)
        self.action_dim = action_dim
        if isConv:  # 图像作为输入时
            self.q_net = ConvolutionQnet(input_shape, action_dim, in_channels=input_shape[0]).to(device)
            self.target_q_net = ConvolutionQnet(input_shape, action_dim, in_channels=input_shape[0]).to(device)
        else:
            self.q_net = Qnet(state_dim, hidden_dim, action_dim, hidden_layers).to(device)
            self.target_q_net = Qnet(state_dim, hidden_dim, action_dim, hidden_layers).to(device)

        # 初始化目标价值网络并使其参数和价值网络一样
        self.target_q_net.load_state_dict(self.q_net.state_dict())

        # 优化器
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=learning_rate)
        # 折扣因子
        self.gamma = gamma
        # 采样策略
        self.st = args.sampling_strategy
        if args.sampling_strategy == "epsilon":
            # epsilon-greedy策略（决定是进行探索还是利用）
            self.epsilon = args.epsilon
        else:
            # 玻尔兹曼采样策略
            self.temperature = args.temperature
        self.device = device

        # 记录更新次数
        self.count = 0
        self.args = args

    def epsilon_policy(self, state, prediction):
        # epsilon-greedy策略
        if np.random.random() < self.epsilon and not prediction:
            action = np.random.randint(self.action_dim)
        else:
            # 获取输出值(Q值)最大的那个action
            action = self.q_net(state).argmax().item()
        return action

    def boltzmann(self, state):
        # 玻尔兹曼策略
        action_value = self.q_net(state)
        action = action_value * (1 / self.temperature)
        softmax_action = torch.softmax(action, dim=1)
        action = torch.distributions.Categorical(softmax_action).sample().cpu().numpy()
        return action[0]

    def numpy_to_tensor_gpu(self, data):
        return torch.tensor(data).to(self.device)

    def max_q_values(self, state):
        """Double DQN 中使用训练的q_net来选取动作"""
        state = torch.tensor([state], dtype=torch.float).to(self.device)
        return self.q_net(state).max().item()

    def update(self, transition_dict):
        """
        更新方式Q网络
        :param transition_dict: 一批轨迹数据
        """
        states = torch.tensor(transition_dict['states'], dtype=torch.float).to(self.device)
        actions = torch.tensor(transition_dict['actions'], dtype=torch.int64).view(-1, 1).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'], dtype=torch.float).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float).to(self.device)
        dones = torch.tensor(transition_dict['dones'], dtype=torch.float).view(-1, 1).to(self.device)

        with torch.no_grad():
            # 用训练网络选择动作
            max_action = self.q_net(next_states).max(1)[1].view(-1, 1)
            # 用目标网络计算Q值
            max_next_q_values = self.target_q_net(next_states).gather(1, max_action)
            # TD error target  r+gamma*Q(s',a')
            q_targets = rewards + self.gamma * max_next_q_values * (1 - dones)

        # 获取对应(s,a)的Q(s,a)
        q_values = self.q_net(states).gather(1, actions)

        dqn_loss = torch.mean(F.mse_loss(q_values, q_targets))
        self.writer.add_scalar("DoubleDQN/dqn_loss", dqn_loss, self.count)
        # 网络参数更新
        self.optimizer.zero_grad()
        dqn_loss.backward()
        self.optimizer.step()

        # 更新目标网络-使用软更新
        self.soft_update(self.q_net, self.target_q_net)
        self.count += 1

        if self.st == "epsilon":
            # 减少epsilon
            self.epsilon = self.linear_decay( self.epsilon, finish=0.01)
            self.writer.add_scalar("DoubleDQN/epsilon", self.epsilon, self.count)
        else:
            # 减少temperature
            self.temperature = self.linear_decay(self.temperature, finish=0.5)
            self.writer.add_scalar("DoubleDQN/temperature", self.temperature, self.count)

    def save(self):
        if not os.path.exists(self.args.model_path_pre):
            os.mkdir(self.args.model_path_pre)
        torch.save(self.q_net.state_dict(), self.args.model_path)

    def load(self, args):
        torch.load(self.q_net, self.args.model_path)


if __name__ == '__main__':
    from buffer.ReplayBuffer import ReplayBuffer
    from run.train import Trainer
    import gym
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("--env_name", type=str, default="CartPole-v0")
    parser.add_argument("--agent_name", type=str)
    parser.add_argument("--learning_rate", type=float, default=2e-3)
    parser.add_argument("--buffer_size", type=int, default=2 ** 14)
    parser.add_argument("--internal_log", type=int, default=50, help="每隔多少个episode打印日志")
    parser.add_argument("--evaluate_internal", type=int, default=10, help="每隔多少个episode评估一下模型")
    parser.add_argument("--record_path_pre", type=str, default="../run/logs/", help="存储tensorboard日志文件的地方")
    parser.add_argument("--agent_type", type=str, default="offPolicy", help="Agent类型")
    parser.add_argument("--minimal_size", type=int, default=1000, help="offPolicy下开始更新时，buffer的最小容量")
    parser.add_argument("--model_path_pre", type=str, default="../run/models/", help="模型保存位置以及名字")
    parser.add_argument("--sampling_strategy", type=str, default="boltzmann", help="使用什么采样策略，部分贪婪还是玻尔兹曼")
    parser.add_argument("--maxCount", type=int, default=1e6, help="使用指数衰减,衰减到固定值所需步长")
    parser.add_argument("--epsilon", type=float, default=0.1)
    parser.add_argument("--temperature", type=float, default=5, help="玻尔兹曼分布温度系数")
    parser.add_argument("--tau", type=float, default=0.005, help="软更新参数")
    parser.add_argument("--num_episodes", type=int, default=500)
    args = parser.parse_args()
    env_names = ['CartPole-v0']
    for env_name in env_names:
        for st in ['epsilon' ,'boltzmann']:
            args.sampling_strategy = st
            args.agent_name = "DoubleDQN"
            args.model_path = args.model_path_pre + args.agent_name + "_" + env_name + "_" + args.sampling_strategy + ".pt"
            args.record_path = args.record_path_pre + args.agent_name + "_" + env_name + "_" + args.sampling_strategy
            args.env_name = env_name
            args.batch_size = 64

            env = gym.make(env_name)
            state_dim = env.observation_space.shape[0]
            action_dim = env.action_space.n
            hidden_dim = 128
            hidden_layers = 1
            gamma = 0.99
            device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            args.device = device
            replay_buffer = ReplayBuffer(args.buffer_size)

            agent = DoubleDQN(state_dim, hidden_dim, action_dim, hidden_layers, args.learning_rate, gamma, device,
                              args=args)

            trainer = Trainer(env, agent, replay_buffer, args)
            trainer.train()
