import math

import torch
import torch.nn as nn

class TaggingProjector(nn.Module):

    def __init__(self, hidden_size, num_relations, name='proj', **kwargs):
        super().__init__()
        self.name = name
        self.fc_layers = [nn.Linear(hidden_size, 3) for _ in range(num_relations)]
        for index, fc in enumerate(self.fc_layers):
            self.register_parameter('{}_weights_{}'.format(self.name, index), fc.weight)
            self.register_parameter('{}_bias_{}'.format(self.name, index), fc.bias)

    def forward(self, hidden, **kwargs):
        """Project hiddens to tags for each relation.

        Args:
            hidden: Tensor, shape (batch_size, 1+2+...+seq_len, hidden_size)

        Returns:
            outputs: Tensor, shape (batch_size, num_relations, 1+2+...+seq_len, num_tags=3)
        """
        outputs = []
        for fc in self.fc_layers:
            outputs.append(fc(hidden))
        outputs = torch.stack(outputs, dim=1)
        outputs = torch.softmax(outputs, dim=-1)
        return outputs


class DistanceEmbedding(nn.Module):

    def __init__(self, max_positions=512, embedding_size=768, **kwargs):
        super().__init__()
        self.max_positions = max_positions
        self.embedding_size = embedding_size
        self.dist_embedding = self._init_embedding_table()
        self.register_parameter('distance_embedding', self.dist_embedding)

    def _init_embedding_table(self):
        matrix = torch.zeros((self.max_positions, self.embedding_size),dtype=float, requires_grad=False)
        
        for d in range(self.max_positions):
            for i in range(self.embedding_size):
                if i % 2 == 0:
                    matrix[d, i] = math.sin(d / 10000**(i / self.embedding_size))
                else:
                    matrix[d, i] = math.cos(d / 10000**((i - 1) / self.embedding_size))

        embedding_table = nn.Parameter(data=matrix, requires_grad=False)
        return embedding_table

    def forward(self, inputs, **kwargs):
        """Distance embedding.

        Args:
            inputs: Tensor, shape (batch_size, seq_len, hidden_size)

        Returns:
            embedding: Tensor, shape (batch_size, 1+2+...+seq_len, embedding_size)
        """
        batch_size, seq_len = inputs.size()[0], inputs.size()[1]
        segs = []
        for index in range(seq_len, 0, -1):
            segs.append(self.dist_embedding[:index, :])
        segs = torch.cat(segs, dim=0)
        embedding = segs[None, :, :].repeat(batch_size, 1, 1)
        return embedding


from .layers.handshaking import ClnHandshaking
class ClnTPLinkerHead(nn.Module):
    def __init__(self, hidden_size, num_relations, max_positions=512,
                 inner_encoder=None, add_distance_embedding=False, **kwargs) -> None:
        super(ClnTPLinkerHead, self).__init__()
        self.handshaking = ClnHandshaking(hidden_size, inner_encoder)
        self.h2t_proj = nn.Linear(hidden_size, 2)
        self.h2h_proj = TaggingProjector(hidden_size, num_relations, name='proj4head2head')
        self.t2t_proj = TaggingProjector(hidden_size, num_relations, name='proj4tail2tail')
        self.add_distance_embedding = add_distance_embedding
        if self.add_distance_embedding:
            self.distance_embedding = DistanceEmbedding(max_positions, embedding_size=hidden_size)

    def forward(self, hiddens, **kwargs):
        """TPLinker model forward pass.

        Args:
            hidden: Tensor, output of BERT or BiLSTM, shape (batch_size, seq_len, hidden_size)

        Returns:
            h2t_hidden: Tensor, shape (batch_size, 1+2+...+seq_len, 2),
                logits for entity recognization
            h2h_hidden: Tensor, shape (batch_size, num_relations, 1+2+...+seq_len, 3),
                logits for relation recognization
            t2t_hidden: Tensor, shape (batch_size, num_relations, 1+2+...+seq_len, 3),
                logits for relation recognization
        """
        handshaking_hidden = self.handshaking(hiddens)
        h2t_hidden, rel_hidden = handshaking_hidden, handshaking_hidden
        if self.add_distance_embedding:
            h2t_hidden += self.distance_embedding(hiddens)
            rel_hidden += self.distance_embedding(hiddens)
        h2t_hidden = self.h2t_proj(h2t_hidden)
        h2h_hidden = self.h2h_proj(rel_hidden)
        t2t_hidden = self.t2t_proj(rel_hidden)
        return h2t_hidden, h2h_hidden, t2t_hidden
    
from transformers import BertModel
class ClnTPLinkerBert(nn.Module):

    def __init__(self, bert_model_path, num_relations, inner_encoder=None, add_distance_embedding=False, **kwargs):
        super().__init__()
        self.bert = BertModel.from_pretrained(bert_model_path)
        self.tplinker = ClnTPLinkerHead(
            hidden_size=self.bert.config.hidden_size,
            num_relations=num_relations,
            add_distance_embedding=add_distance_embedding,
            max_positions=512,
            inner_encoder=inner_encoder)

    def forward(self, input_ids, attention_mask, token_type_ids, **kwargs):
        outputs = dict()
        
        handshaking_logits = self.bert(input_ids, attention_mask, token_type_ids)[0]
        h2t_outputs, h2h_outputs, t2t_outputs = self.tplinker(handshaking_logits)

        logits = {
            "h2t":h2t_outputs,
            "h2h":h2h_outputs,
            "t2t":t2t_outputs,
        }
        
        outputs["logits"] = logits

        return outputs