import torch.utils
import torch.utils.cpp_extension

from utils.functions import *
from network.encoder import Encoder
from network.decoder import Decoder
from network.lstm import LSTM
from network.discriminator import ShortMotionDiscriminator, LongMotionDiscriminator



def reparameterize(mu, logvar):
    std = torch.exp(0.5 * logvar)
    eps = torch.randn_like(std)
    return mu + eps * std


def mix_reparameterize(mu, logvar, t_mu, t_logvar, ratio):
    mix_mean = ratio * t_mu + (1 - ratio) * mu
    mix_var = (1 - ratio) * logvar + ratio * t_logvar
    std = torch.exp(0.5 * mix_var)
    eps = torch.randn_like(std)
    return mix_mean + eps * std, mix_mean, mix_var


class CVAE(nn.Module):
    def __init__(self, conf, skeleton):
        super().__init__()
        self.data_conf = conf["data"]
        self.model_conf = conf["model"]
        self.skeleton = skeleton

        # build separate encoder network
        # separate: state, offset, target
        self.state_encoder = Encoder(self.model_conf["encoder"]["state_dim"], dropout=0).cuda()
        self.offset_encoder = Encoder(self.model_conf["encoder"]["offset_dim"], dropout=0).cuda()
        self.target_encoder = Encoder(self.model_conf["encoder"]["target_dim"], dropout=0).cuda()

        # build lstm network
        self.lstm = LSTM(self.model_conf["lstm"]["lstm_dim"], self.model_conf["lstm"]["layer_num"]).cuda()
        self.d_lstm = LSTM([self.model_conf["latent_size"]
                            + self.model_conf["encoder"]["target_dim"][-1]
                            + self.model_conf["encoder"]["state_dim"][-1],  # 16 + 256 + 256
                            self.model_conf["lstm"]["lstm_dim"][-1]], self.model_conf["lstm"]["layer_num"]).cuda()

        # mean & var
        self.l_mn = torch.nn.Linear(self.model_conf["lstm"]["lstm_dim"][-1], self.model_conf["latent_size"]).cuda()
        self.l_var = torch.nn.Linear(self.model_conf["lstm"]["lstm_dim"][-1], self.model_conf["latent_size"]).cuda()

        # target mean & var
        self.l_t_mn = torch.nn.Linear(self.model_conf["lstm"]["lstm_dim"][-1], self.model_conf["latent_size"]).cuda()
        self.l_t_var = torch.nn.Linear(self.model_conf["lstm"]["lstm_dim"][-1], self.model_conf["latent_size"]).cuda()

        # build decoder network
        self.decoder = Decoder(self.model_conf["decoder"]["decoder_dim"], self.model_conf["decoder"]["out_dim"],
                               self.model_conf["decoder"]["contact_dim"]).cuda()

        # pred
        self.pred_contact = None
        self.pred_local_quaternion = None
        self.pred_root_velocity = None
        self.pred_root_position = None
        self.pred_global_position = None

        # loss
        self.root_loss = 0
        self.contact_loss = 0
        self.position_loss = 0
        self.quaternion_loss = 0
        self.KLD = 0

    def reset(self, bs):
        self.lstm.init_hidden(bs)
        self.d_lstm.init_hidden(bs)
        # pred
        self.pred_contact = None
        self.pred_local_quaternion = None
        self.pred_root_velocity = None
        self.pred_root_position = None
        self.pred_global_position = None

        self.root_loss = 0
        self.contact_loss = 0
        self.position_loss = 0
        self.quaternion_loss = 0
        self.KLD = 0

    def sample(self, batch_size):
        sampled_embedding = np.random.randn(1,
                                            batch_size,
                                            self.model_conf["latent_size"])

        return sampled_embedding

    def get_opposite_target(self):
        """
        output as the opposite network's target input
        :return:
        """
        if self.pred_local_quaternion is None:
            return None
        else:
            return [self.pred_local_quaternion[0],
                    self.pred_root_velocity[0],
                    self.pred_contact[0],
                    self.pred_root_position[0]]

    def get_loss(self):
        return [self.root_loss,
                self.contact_loss,
                self.position_loss,
                self.quaternion_loss,
                self.KLD]

    def forward(self, batch_sample, t, target_index, target_info, seq_len, z_tta, std, positive=True, train=True):
        cur_sequence_length = seq_len // 2 + (seq_len // 10) * 2

        # root position
        root_position = batch_sample["root_position"].cuda()

        # global_position
        global_position = batch_sample["global_position"].cuda()

        # state info
        local_quaternion = batch_sample["local_quaternion"].cuda()
        root_velocity = batch_sample["root_velocity"].cuda()
        contact = batch_sample["contact"].cuda()

        step = t if positive else t - self.data_conf["sequence_length"] + seq_len
        # target
        if step < seq_len // 2 - 1:
            target = target_info[0].cuda()

            # ttime to arrival: 0 ; target[target, target_root_velocity, target_contact, target_global_position]
            target_state = torch.cat(target_info[0:3], -1).cuda()

            # offset info
            root_position_offset = target_info[3].cuda()
            local_quaternion_offset = target_info[0].cuda()
        else:
            offset = 0 if positive else self.data_conf["sequence_length"] - seq_len
            target = local_quaternion[:, offset + seq_len - 1]
            target = target.view(target.size(0), -1)
            target_contact = contact[:, offset + seq_len - 1]
            target_root_velocity = root_velocity[:, offset + seq_len - 1 - 1]
            target_state = torch.cat([target, target_root_velocity, target_contact], -1)

            # offset info
            root_position_offset = batch_sample["global_position"][:, offset + seq_len - 1, 0, :].cuda()
            local_quaternion_offset = batch_sample["local_quaternion"][:, offset + seq_len - 1, :, :].cuda()
            local_quaternion_offset = local_quaternion_offset.view(local_quaternion_offset.size(0), -1)

        # state
        if step == 0:
            cur_contact = contact[:, t]
            cur_local_quaternion = local_quaternion[:, t]
            cur_local_quaternion = cur_local_quaternion.view(cur_local_quaternion.size(0), -1)
            cur_root_velocity = root_velocity[:, t]
            cur_root_position = root_position[:, t]
        else:
            cur_contact = self.pred_contact[0]
            cur_local_quaternion = self.pred_local_quaternion[0]
            cur_root_velocity = self.pred_root_velocity[0]
            cur_root_position = self.pred_root_position[0]

        # offset
        cur_root_position_offset = root_position_offset - cur_root_position
        cur_local_quaternion_offset = local_quaternion_offset - cur_local_quaternion

        state_input = torch.cat([cur_local_quaternion, cur_root_velocity, cur_contact], -1)
        offset_input = torch.cat([cur_root_position_offset, cur_local_quaternion_offset], -1)
        target_input = target

        # encode
        state_embedding = self.state_encoder(state_input)
        offset_embedding = self.offset_encoder(offset_input)
        target_embedding = self.target_encoder(target_input)

        target_state_embedding = self.state_encoder(target_state)
        target_before_embedding = target_embedding

        # add Ztta
        if t < seq_len // 2 - 1:
            index = 2 * step if positive else 2 * step + 1
            state_embedding += z_tta[:, index]
            offset_embedding += z_tta[:, index]
            target_embedding += z_tta[:, index]
        else:
            state_embedding += z_tta[:, step + 1]
            offset_embedding += z_tta[:, step + 1]
            target_embedding += z_tta[:, step + 1]

        robust_embedding = torch.cat([offset_embedding, target_embedding], -1)

        # generate target noise embedding
        if t < seq_len // 2 - 1:
            lambda_target = gen_ztarget_by_tta(target_index - t)
        else:
            lambda_target = gen_ztarget(t, seq_len)

        z_target = self.model_conf["noise_theta"] * lambda_target * torch.FloatTensor(
            robust_embedding.size()).normal_().cuda()
        robust_embedding += z_target

        # concat separate embeddings
        input_embedding = torch.cat([state_embedding, robust_embedding], -1).unsqueeze(0)

        # target - concat separate embeddings
        target_concat_embedding = torch.cat([target_state_embedding,
                                             torch.zeros(tuple(offset_embedding.shape)).cuda(),
                                             target_before_embedding], -1).unsqueeze(0)
        hidden_target_state = self.lstm(target_concat_embedding)
        t_mean, t_var = self.l_t_mn(hidden_target_state), self.l_t_var(hidden_target_state)

        blend_ratio = gen_ratio(t)

        # Train
        if train:
            # lstm
            hidden_state = self.lstm(input_embedding)
            # cal distribution
            mean, var = self.l_mn(hidden_state), self.l_var(hidden_state)
            # sample
            sampled_embedding, mean, var = mix_reparameterize(mean, var, t_mean, t_var, blend_ratio)

        # Inference
        else:
            hidden_state = self.lstm(input_embedding)
            mean, var = self.l_mn(hidden_state), self.l_var(hidden_state)
            sampled_embedding, mean, var = mix_reparameterize(mean, var, t_mean, t_var, blend_ratio)

        # z, target embedding and state embedding
        concat_sampled_embedding = torch.cat([sampled_embedding,
                                              target_embedding.unsqueeze(0),
                                              state_embedding.unsqueeze(0)], -1)

        # decode
        hidden_state = self.d_lstm(concat_sampled_embedding)
        output, self.pred_contact = self.decoder(hidden_state)

        # update quaternion
        self.pred_local_quaternion = cur_local_quaternion + output[:, :, :self.data_conf["target_input_dim"]]
        pred_local_quaternion_norm = self.pred_local_quaternion.view(self.pred_local_quaternion.size(0),
                                                                     self.pred_local_quaternion.size(1), -1, 4)
        pred_local_quaternion_norm = pred_local_quaternion_norm / torch.norm(pred_local_quaternion_norm,
                                                                             dim=-1,
                                                                             keepdim=True)

        # update root
        self.pred_root_velocity = output[:, :, self.data_conf["target_input_dim"]:]
        self.pred_root_position = cur_root_position + self.pred_root_velocity
        self.pred_global_position = self.skeleton.forward_kinematics(pred_local_quaternion_norm,
                                                                     self.pred_root_position)

        # quaternion loss
        actual_local_quaternion = local_quaternion[:, t + 1].view(local_quaternion[:, t + 1].size(0), -1)
        self.quaternion_loss += torch.mean(torch.abs(
            self.pred_local_quaternion[0] - actual_local_quaternion)) / cur_sequence_length
        # root loss
        self.root_loss += torch.mean(torch.abs(self.pred_root_position[0] - root_position[:, t + 1])
                                     / std[:, :, 0]) / cur_sequence_length
        # contact loss
        self.contact_loss += torch.mean(
            torch.abs(self.pred_contact[0] - contact[:, t + 1])) / cur_sequence_length
        # global position loss
        self.position_loss += torch.mean(torch.abs(
            self.pred_global_position[0] - global_position[:, t + 1]) / std) / cur_sequence_length

        if train:
            self.KLD += -0.5 * torch.mean(1 + var - mean.pow(2) - var.exp()) / cur_sequence_length

        if not train:
            return self.pred_global_position[0], \
                   self.pred_contact[0], \
                   self.pred_local_quaternion.view(self.pred_local_quaternion.shape[1], -1, 4), \
                   self.pred_root_position[0]

        return self.pred_global_position[0], self.pred_contact[0]