import torch
import torch.nn as nn
from .Model import Model


class SUKE(Model):

    def __init__(self, ent_tot, rel_tot, dim=100, margin=None, epsilon=None):
        super(SUKE, self).__init__(ent_tot, rel_tot)

        self.dim = dim
        self.margin = margin
        self.epsilon = epsilon
        self.ent_embeddings = nn.Embedding(self.ent_tot, self.dim)
        self.rel_embeddings = nn.Embedding(self.rel_tot, self.dim)

        # self.alpha = 0.5
        # self.beita = 0.5
        p_stru = torch.randn(1, requires_grad=True)
        b_stru = torch.randn(1, requires_grad=True)
        p_unce = torch.randn(1, requires_grad=True)
        b_unce = torch.randn(1, requires_grad=True)
        self.p_stru = torch.nn.Parameter(p_stru)
        self.b_stru = torch.nn.Parameter(b_stru)
        self.p_unce = torch.nn.Parameter(p_unce)
        self.b_unce = torch.nn.Parameter(b_unce)

        if margin == None or epsilon == None:
            nn.init.xavier_uniform_(self.ent_embeddings.weight.data)
            nn.init.xavier_uniform_(self.rel_embeddings.weight.data)
        else:
            self.embedding_range = nn.Parameter(
                torch.Tensor([(self.margin + self.epsilon) / self.dim]), requires_grad=False
            )
            nn.init.uniform_(
                tensor=self.ent_embeddings.weight.data,
                a=-self.embedding_range.item(),
                b=self.embedding_range.item()
            )
            nn.init.uniform_(
                tensor=self.rel_embeddings.weight.data,
                a=-self.embedding_range.item(),
                b=self.embedding_range.item()
            )

    def _calc(self, h, t, r, mode):
        if mode != 'normal':
            h = h.view(-1, r.shape[0], h.shape[-1])
            t = t.view(-1, r.shape[0], t.shape[-1])
            r = r.view(-1, r.shape[0], r.shape[-1])
        if mode == 'head_batch':
            score = h * (r * t)
        else:
            score = (h * r) * t
            score = torch.sum(score, -1)

        o_stru = (self.p_stru * score * self.b_stru) * -1.0
        q_stru = 1 / (1 + o_stru.exp_())

        o_unce = (self.p_unce * score * self.b_unce) * -1.0
        q_unce = 1 / (1 + o_unce.exp_())

        q_stru = q_stru.flatten()
        q_unce = q_unce.flatten()

        # score = (self.alpha * Q_stru) + (self.beita * Q_unce)
        # score = score.flatten()
        return [q_stru, q_unce]

    def forward(self, data):
        batch_h = data['batch_h'].long()
        batch_t = data['batch_t'].long()
        batch_r = data['batch_r'].long()
        mode = data['mode']
        h = self.ent_embeddings(batch_h)
        t = self.ent_embeddings(batch_t)
        r = self.rel_embeddings(batch_r)
        score = self._calc(h, t, r, mode)
        return score

    def regularization(self, data):
        batch_h = data['batch_h'].long()
        batch_t = data['batch_t'].long()
        batch_r = data['batch_r'].long()
        h = self.ent_embeddings(batch_h)
        t = self.ent_embeddings(batch_t)
        r = self.rel_embeddings(batch_r)
        regul = (torch.mean(h ** 2) + torch.mean(t ** 2) + torch.mean(r ** 2)) / 3
        return regul

    def l3_regularization(self):
        return (self.ent_embeddings.weight.norm(p=3) ** 3 + self.rel_embeddings.weight.norm(p=3) ** 3)

    def predict(self, data):
        score = -self.forward(data)
        return score.cpu().data.numpy()
