import argparse
import os
import random

import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as Fn
from scipy.optimize import fmin_l_bfgs_b
from torch.autograd import Variable
from torch.distributions import Normal
from torch.optim import Adam

import config
from env.turtlebot3 import Turtlebot3
from ounoise import OUNoise


def soft_update(target, source, tau):
    for target_param, param in zip(target.parameters(), source.parameters()):
        target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)

def hard_update(target, source):
    for target_param, param in zip(target.parameters(), source.parameters()):
        target_param.data.copy_(param.data)

class LayerNorm(nn.Module):
    def __init__(self, num_features, eps=1e-5, affine=True):
        super(LayerNorm, self).__init__()
        self.num_features = num_features
        self.affine = affine
        self.eps = eps

        if self.affine:
            self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
            self.beta = nn.Parameter(torch.zeros(num_features))

    def forward(self, x):
        shape = [-1] + [1] * (x.dim() - 1)
        mean = x.view(x.size(0), -1).mean(1).view(*shape)
        std = x.view(x.size(0), -1).std(1).view(*shape)

        y = (x - mean) / (std + self.eps)
        if self.affine:
            shape = [1, -1] + [1] * (x.dim() - 2)
            y = self.gamma.view(*shape) * y + self.beta.view(*shape)
        return y

nn.LayerNorm = LayerNorm

class Actor(nn.Module):
    def __init__(self, input_dims, output_dims, hidden_dims, action_gain=None):
        super(Actor, self).__init__()
        self.action_gain = action_gain

        self.linear1 = nn.Linear(input_dims, hidden_dims)
        self.ln1 = nn.LayerNorm(hidden_dims)

        self.linear2 = nn.Linear(hidden_dims, hidden_dims)
        self.ln2 = nn.LayerNorm(hidden_dims)

        self.ln3 = nn.Linear(hidden_dims, output_dims)
        self.ln3.weight.data.mul_(0.1)
        self.ln3.bias.data.mul_(0.1)

    def forward(self, x, g):
        x = torch.cat((x, g), dim=-1)
        x = self.linear1(x)
        x = self.ln1(x)
        x = Fn.relu(x)
        x = self.linear2(x)
        x = self.ln2(x)
        x = Fn.relu(x)
        u = self.action_gain*torch.tanh(self.ln3(x))
        return u

class Critic(nn.Module):
    def __init__(self, input_dims, hidden_dims, horizon):
        super(Critic, self).__init__()

        self.linear1 = nn.Linear(input_dims, hidden_dims)
        self.ln1 = nn.LayerNorm(hidden_dims)

        self.linear2 = nn.Linear(hidden_dims, hidden_dims)
        self.ln2 = nn.LayerNorm(hidden_dims)

        self.Q = nn.Linear(hidden_dims, 1)
        self.Q.weight.data.mul_(0.1)
        self.Q.bias.data.mul_(0.1)

        self.q_init = -0.067
        self.q_limit = -horizon
        self.q_offset = -np.log(self.q_limit / self.q_init - 1)

    def forward(self, x, g, u):
        xg = torch.cat((x, g, u), 1)
        x = self.linear1(xg)
        x = self.ln1(x)
        x = Fn.relu(x)
        x = self.linear2(x)
        x = self.ln2(x)
        x = Fn.relu(x)
        # q = torch.sigmoid(self.Q(x)+self.q_offset)*self.q_limit
        q = self.Q(x)
        return q

from collections import namedtuple

Transition = namedtuple(
    'Transition', ('state', 'goal', 'action', 'next_state', 'reward', 'done'))
class ReplayMemory(object):
    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = []
        self.position = 0

    def push(self, *args):
        """Saves a transition."""
        if len(self.memory) < self.capacity:
            self.memory.append(None)

        self.memory[self.position] = Transition(*args)
        self.position = int((self.position + 1) % self.capacity)

    def sample(self, batch_size):
        samples = random.sample(self.memory, batch_size)
        batch = Transition(*zip(*samples))
        return batch

    def __len__(self):
        return len(self.memory)

class Layer(object):
    def __init__(self, layer_number, args):
        self.layer_number = layer_number
        self.args = args

        self.dim_x = args.dim_x
        self.dim_g = args.dim_g
        self.dim_u = args.dim_u
        self.gamma = args.gamma
        self.tau = args.tau
        self.horizon = args.horizon
        self.mem = ReplayMemory(args.replay_size)

        if layer_number == 0:
            self.action_gain = torch.ones(self.dim_u)
            self.actor = Actor(input_dims=self.dim_x + self.dim_g, output_dims=self.dim_u, hidden_dims=128, action_gain=self.action_gain)
            self.actor_target = Actor(input_dims=self.dim_x + self.dim_g, output_dims=self.dim_u, hidden_dims=128, action_gain=self.action_gain)
            self.actor_optim = Adam(self.actor.parameters(), lr=1e-4)

            self.critic = Critic(input_dims=self.dim_x + self.dim_g + self.dim_u, hidden_dims=128, horizon=self.horizon)
            self.critic_target = Critic(input_dims=self.dim_x + self.dim_g + self.dim_u, hidden_dims=128, horizon=self.horizon)
            self.critic_optim = Adam(self.critic.parameters(), lr=1e-3)

            self.noise_perc = 0.2
        else:
            self.action_gain = torch.ones(self.dim_g)
            self.action_gain[0] *= np.pi
            self.action_gain[1] *= 15
            self.actor = Actor(input_dims=self.dim_x + self.dim_g, output_dims=self.dim_g, hidden_dims=128, action_gain=self.action_gain)
            self.actor_target = Actor(input_dims=self.dim_x + self.dim_g, output_dims=self.dim_g, hidden_dims=128, action_gain=self.action_gain)
            self.actor_optim = Adam(self.actor.parameters(), lr=1e-3)

            self.critic = Critic(input_dims=self.dim_x + self.dim_g + self.dim_g, hidden_dims=128, horizon=self.horizon)
            self.critic_target = Critic(input_dims=self.dim_x + self.dim_g + self.dim_g, hidden_dims=128, horizon=self.horizon)
            self.critic_optim = Adam(self.critic.parameters(), lr=1e-3)
            self.noise_perc = 0.1

        hard_update(self.actor_target, self.actor)  # Make sure target is with the same weight
        hard_update(self.critic_target, self.critic)

    def forward(self, state, goal, action_noise=None):
        state = torch.Tensor([state])
        goal = torch.Tensor([goal])
        # If testing mode or testing subgoals, action is output of actor network without noise
        action = self.actor(state, goal).detach().numpy()[0]

        if action_noise is not None:
            action += action_noise.noise()

        return np.clip(action, -1, 1)


    # Create action replay transition by evaluating hindsight action given original goal
    def add_memory(self, state, goal, action, next_state, reward, done):
        state = torch.Tensor([state])
        goal = torch.Tensor([goal])
        action = torch.Tensor([action])
        next_state = torch.Tensor([next_state])
        reward = torch.Tensor([reward])
        done = torch.Tensor([done])
        self.mem.push(state, goal, action, next_state, reward, done)

    # Update actor and critic networks
    def learn(self, batch_size=10, opt_iters=10):
        if len(self.mem) > batch_size:
            for _ in range(opt_iters):
                batch = self.mem.sample(batch_size)
                state_batch = torch.cat(batch.state)
                goal_batch = torch.cat(batch.goal)
                action_batch = torch.cat(batch.action)
                next_state_batch = torch.cat(batch.next_state)
                reward_batch = torch.cat(batch.reward)
                done_batch = torch.cat(batch.done)

                next_action_batch = self.actor_target(next_state_batch, goal_batch)
                expected_q_value = reward_batch + self.gamma * (1-done_batch) * self.critic_target(next_state_batch, goal_batch, next_action_batch) # * (1-done_batch)
                # expected_q_value = torch.clamp(expected_q_value, -self.horizon, 0) # -horizon
                # update the parameters of actor network
                self.critic_optim.zero_grad()
                q_value = self.critic(state_batch, goal_batch, action_batch)
                q_value_loss = Fn.mse_loss(q_value, expected_q_value.detach())
                # update the parameters of critic network
                q_value_loss.backward()
                self.critic_optim.step()

                self.actor_optim.zero_grad()
                _action_batch = self.actor(state_batch, goal_batch)
                policy_loss = -self.critic(state_batch, goal_batch, _action_batch)
                policy_loss = policy_loss.mean()
                policy_loss.backward()
                self.actor_optim.step()

                soft_update(self.actor_target, self.actor, self.tau)
                soft_update(self.critic_target, self.critic, self.tau)
        else:
            print('The size of memory is less than batch size!')

class Agent(object):
    def __init__(self, env, ounoise, args):
        self.env = env
        self.ounoise = ounoise
        self.args = args
        self.dim_g = args.dim_g
        self.num_layers = args.num_layers
        self.horizon = args.horizon
        self.goal_array = [None for _ in range(self.num_layers)]
        # x, v, theta, theta_dot
        self.goal_space_train = [[np.deg2rad(-16), np.deg2rad(16)], [-0.6, 0.6]]#,
        self.goal_space_test = [[0, 0], [0, 0]] #
        self.pol = Layer(0, args)

        self.test = False

    def train(self, episode_num, test=False):
        state_t = self.env.reset()
        self.goal_array[self.num_layers - 1] = self.get_goal()
        sum_reward = 0
        time_step = 0

        if args.ou_noise:
            ounoise.scale = (args.noise_scale - args.final_noise_scale) * max(0, args.exploration_end -
                                                                              episode_num) / args.exploration_end + args.final_noise_scale
            ounoise.reset()

        while True:
            time_step += 1

            goal_t = self.goal_array[-1]
            action_t = self.pol.forward(state_t, goal_t, action_noise=ounoise)
            next_state_t, reward_t, done_t = self.env.step(action_t)
            sum_reward += reward_t

            self.pol.add_memory(state_t, goal_t, action_t, next_state_t, reward_t, done_t)

            if not test:
                self.pol.learn(batch_size=args.batch_size, opt_iters=50)

            state_t = next_state_t

            if time_step > args.timesteps:
                break

        print('Epoch: %i, Accumulated rewards: %f' % (episode_num, sum_reward))

        return

    def get_goal(self, test=False):
        end_goal = self.env.get_goal()
        return end_goal

    def get_intrinsic_reward(self, state, subgoal):
        # old_reps = self.env.project_state_to_subgoal(state)
        new_goal = self.env.project_state_to_subgoal()
        reward = -np.linalg.norm(new_goal-subgoal, ord=2)
        if reward > -0.01:
            return reward, True
        return reward, False

    def check_goal(self, state, hindsight_goal, goal_thresholds):
        new_goal = self.env.project_state_to_subgoal()
        for i in range(len(new_goal)):
            if np.absolute(new_goal[i] - hindsight_goal[i]) > goal_thresholds[i]:
                return False
        return True

parser = argparse.ArgumentParser(description='PyTorch DDPG Learning')

parser.add_argument('--dim_x', type=int, default=config.state_dim, metavar='G',
                    help='Dimension of observations')
parser.add_argument('--dim_g', type=int, default=config.goal_dim, metavar='G',
                    help='Dimension of motivation')
parser.add_argument('--dim_u', type=int, default=config.action_dim, metavar='G',
                    help='Dimension of control')

parser.add_argument('--n_epochs', type=int, default=500, metavar='G',
                    help='Number of epochs')
parser.add_argument('--timesteps', type=int, default=200, metavar='G',
                    help='Horizon of each epoch')

parser.add_argument('--num_layers', type=int, default=2, metavar='G',
                    help='Number of layers')
parser.add_argument('--horizon', type=int, default=20, metavar='G',
                    help='Horizon of attempts')

parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
                    help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.01, metavar='G',
                    help='delay rate for weight copy (deflaut: 0.001)')

parser.add_argument('--ou_noise', type=bool, default=True)
parser.add_argument('--noise_scale', type=float, default=0.2, metavar='G',
                    help='initial noise scale (default: 0.3)')
parser.add_argument('--final_noise_scale', type=float, default=0.2, metavar='G',
                    help='final noise scale (default: 0.3)')
parser.add_argument('--exploration_end', type=int, default=100, metavar='N',
                    help='number of episodes with noise (default: 100)')


parser.add_argument('--batch_size', type=int, default=64, metavar='G',
                    help='Size of each batch')
parser.add_argument('--replay_size', type=int, default=1e7, metavar='N',
                    help='size of replay buffer (default: 1000000)')
parser.add_argument('--seed', type=int, default=4, metavar='N',
                    help='random seed (default: 4)')

args = parser.parse_args()

torch.manual_seed(args.seed)
np.random.seed(args.seed)

env = Turtlebot3(config)
env.connect()

ounoise = OUNoise(args.dim_u) if args.ou_noise else None

agent = Agent(env, ounoise, args)


for n in range(args.n_epochs):
    # apply policy
    agent.train(n)
