import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from torch.autograd import Variable
import numpy as np
import unittest


class EditEncoder(nn.Module):
    def __init__(self, word_dim, edit_dim, kappa_init, norm_eps, norm_max):
        super(EditEncoder, self).__init__()
        self.linear = nn.Linear(edit_dim, edit_dim)
        self.linear_prenoise = nn.Linear(word_dim, edit_dim // 2, bias=False)
        self.noise_scalar = kappa_init
        self.norm_eps = norm_eps
        self.norm_max = norm_max
        self.norm_clip = nn.Hardtanh(0, norm_max - norm_eps)

    def forward(self, insert_embeds: PackedSequence, insert_embeds_exact: PackedSequence,
                delete_embeds: PackedSequence, delete_embeds_exact: PackedSequence,
                draw_samples=False, draw_p=False):

        insert_embeds, _ = pad_packed_sequence(insert_embeds)
        insert_embeds_exact, _ = pad_packed_sequence(insert_embeds_exact)
        delete_embeds, _ = pad_packed_sequence(delete_embeds)
        delete_embeds_exact, _ = pad_packed_sequence(delete_embeds_exact)

        # batch_size * word_dim (word_dim is embed_dim)
        insert_embeds = torch.sum(insert_embeds, 1) + torch.sum(insert_embeds_exact, 1)
        delete_embeds = torch.sum(delete_embeds, 1) + torch.sum(delete_embeds_exact, 1)

        insert_set = self.linear_prenoise(insert_embeds)
        delete_set = self.linear_prenoise(delete_embeds)

        combined_map = torch.cat([insert_set, delete_set], 1)

        if draw_samples:
            if draw_p:
                batch_size, edit_dim = combined_map.size()
                combined_map = self.draw_p_noise(batch_size, edit_dim)
            else:
                combined_map = self.draw_vmf(combined_map, self.noise_scalar)
        edit_embed = combined_map
        return edit_embed

    def draw_p_noise(self, batch_size, edit_dim):
        rand_draw = Variable(torch.randn(batch_size, edit_dim))
        rand_draw = rand_draw / torch.norm(rand_draw, p=2, dim=1).expand(batch_size, edit_dim)
        rand_norms = (torch.rand(batch_size, 1) * self.norm_max).expand(batch_size, edit_dim)
        return rand_draw * Variable(rand_norms)

    def draw_vmf(self, mu, kappa):
        batch_size, id_dim = mu.size()
        result_list = []
        for i in range(batch_size):
            mu_norm = mu[i].norm().expand(id_dim)
            mu_noise = self.add_norm_noise(mu_norm, self.norm_eps)
            if float(mu[i].norm().data.cpu().numpy()) > 1e-10:
                # sample offset from center (on sphere) with spread kappa
                w = self._sample_weight(kappa, id_dim)
                wtorch = Variable(w * torch.ones(id_dim))

                # sample a point v on the unit sphere that's orthogonal to mu
                v = self._sample_orthonormal_to(mu[i] / mu_norm, id_dim)

                # compute new point
                scale_factr = torch.sqrt(Variable(torch.ones(id_dim)) - torch.pow(wtorch, 2))
                orth_term = v * scale_factr
                muscale = mu[i] * wtorch / mu_norm
                sampled_vec = (orth_term + muscale) * mu_noise
            else:
                rand_draw = Variable(torch.randn(id_dim))
                rand_draw = rand_draw / torch.norm(rand_draw, p=2).expand(id_dim)
                rand_norms = (torch.rand(1) * self.norm_eps).expand(id_dim)
                sampled_vec = rand_draw * Variable(rand_norms)  # mu[i]
            result_list.append(sampled_vec)

        return torch.stack(result_list, 0)

    def add_norm_noise(self, mu_norm, eps):
        """
        KL loss is - log(maxvalue/eps)
        cut at maxvalue-eps, and add [0,eps] noise.
        """
        trand = torch.rand(1).expand(mu_norm.size()) * eps
        return self.normclip(mu_norm) + Variable(trand)

    @staticmethod
    def _sample_weight(kappa, dim):
        """Rejection sampling scheme for sampling distance from center on
        surface of the sphere.
        """
        dim = dim - 1  # since S^{n-1}
        b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)  # b= 1/(sqrt(4.* kdiv**2 + 1) + 2 * kdiv)
        x = (1. - b) / (1. + b)
        c = kappa * x + dim * np.log(1 - x ** 2)  # dim * (kdiv *x + np.log(1-x**2))

        while True:
            z = np.random.beta(dim / 2., dim / 2.)  # concentrates towards 0.5 as d-> inf
            w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
            u = np.random.uniform(low=0, high=1)
            if kappa * w + dim * np.log(1. - x * w) - c >= np.log(
                    u):  # thresh is dim *(kdiv * (w-x) + log(1-x*w) -log(1-x**2))
                return w

    @staticmethod
    def _sample_orthonormal_to(mu, dim):
        """Sample point on sphere orthogonal to mu.
        """
        v = Variable(torch.randn(dim))
        rescale_value = mu.dot(v) / mu.norm()
        proj_mu_v = mu * rescale_value.expand(dim)
        ortho = v - proj_mu_v
        ortho_norm = torch.norm(ortho)
        return ortho / ortho_norm.expand_as(ortho)


class EditEncoderTest(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(EditEncoderTest, self).__init__(*args, **kwargs)

    def test1(self):
        pass
