import torch
from torch import nn
from torch.nn import Embedding, Linear, LayerNorm
from torch.nn import functional as F

from exp.modules import MultiHeadSelfAttention, AdditiveAttention
from general.positionalEncoder import PositionalEncoder

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class NewsEncoder(torch.nn.Module):
    def __init__(self, config, pretrained_word_embedding=None, pretrained_entity_embedding=None):

        super(NewsEncoder, self).__init__()
        self.config = config

        # category ,subcategory
        self.category_embedding = Embedding(num_embeddings=config.num_categories,
                                            embedding_dim=config.category_embedding_dim, padding_idx=0)
        self.category_linear = Linear(in_features=config.category_embedding_dim,
                                      out_features=config.word_embedding_dim)

        # title
        if pretrained_word_embedding is None:
            self.word_level_embedding = Embedding(num_embeddings=config.num_words,
                                                  embedding_dim=config.word_embedding_dim, padding_idx=0)
        else:
            self.word_level_embedding = nn.Embedding.from_pretrained(
                embeddings=pretrained_word_embedding, freeze=False, padding_idx=0
            )  # freeze: True固定参数， False ：参数更新； padding_idx:指定padding的value，不参与梯度，不改变值

        if self.config.use_abstract_entity and self.config.use_title_entity:
            if pretrained_entity_embedding is None:
                self.entity_embedding = nn.Embedding(config.num_entities,
                                                     config.entity_embedding_dim,
                                                     padding_idx=0)
            else:
                self.entity_embedding = nn.Embedding.from_pretrained(
                    pretrained_entity_embedding, freeze=False, padding_idx=0)
            # entity
            self.entity_positional = PositionalEncoder(num_hiddens=100, dropout=0.2)
            self.entity_msa = MultiHeadSelfAttention(config.entity_embedding_dim,
                                                     config.entity_num_attention_heads)
            self.entity_aa = AdditiveAttention(config.entity_query_vector_dim, config.entity_embedding_dim)
        elif self.config.use_abstract_entity:
            if pretrained_entity_embedding is None:
                self.entity_embedding = nn.Embedding(config.num_entities,
                                                     config.entity_embedding_dim,
                                                     padding_idx=0)
            else:
                self.entity_embedding = nn.Embedding.from_pretrained(
                    pretrained_entity_embedding, freeze=False, padding_idx=0)
            # entity
            self.entity_positional = PositionalEncoder(num_hiddens=100, dropout=0.2)
            self.entity_msa = MultiHeadSelfAttention(config.entity_embedding_dim,
                                                     config.entity_num_attention_heads)
            self.entity_aa = AdditiveAttention(config.entity_query_vector_dim, config.entity_embedding_dim)
            # entity embedding 转变到 word embedding上
            self.transform_matrix = nn.Parameter(
                torch.empty(self.config.entity_embedding_dim,
                            self.config.word_embedding_dim).uniform_(-0.1, 0.1))
            self.transform_bias = nn.Parameter(
                torch.empty(self.config.word_embedding_dim).uniform_(-0.1, 0.1))
            self.entity_linear = Linear(in_features=config.entity_embedding_dim, out_features=config.word_embedding_dim)

        if not self.config.use_title_entity:
            # title/content , word level
            self.word_positional = PositionalEncoder(num_hiddens=300, dropout=0.2)
            self.word_msa = MultiHeadSelfAttention(config.word_embedding_dim,
                                                   config.num_attention_heads)
            self.word_aa = AdditiveAttention(config.query_vector_dim, config.word_embedding_dim)

            self.final_additive_attention = AdditiveAttention(config.query_vector_dim, config.word_embedding_dim)
        else:
            self.final_additive_attention = AdditiveAttention(config.entity_query_vector_dim,
                                                              config.entity_embedding_dim)

        # final attention
        # self.final_additive_attention = AdditiveAttention(config.query_vector_dim, config.word_embedding_dim)

        self.dropout = nn.Dropout(p=config.dropout_probability)

    def forward(self, news):
        """

        Args:
            news:
            {
                    "category": batch_size,
                    "subcategory": batch_size,
                    "title": batch_size * num_words_title
            }

        Returns:

        """

        # category vector represent : batch_size,embed_dim
        category_vector = self.category_embedding(news["category"].to(device))
        subcategory_vector = self.category_embedding(news["subcategory"].to(device))
        if not self.config.use_title_entity:
            category_vector = F.relu(self.category_linear(category_vector))
            subcategory_vector = F.relu(self.category_linear(subcategory_vector))
        category_vector, subcategory_vector = self.dropout(category_vector), self.dropout(subcategory_vector)

        # entity vector represent : batch_size,embed_dim
        if self.config.use_abstract_entity and self.config.use_title_entity:
            entity_embed = self.entity_embedding(news["abstract_entities"].to(device))
            entity_embed = self.entity_positional(entity_embed)
            entity_embed = self.dropout(entity_embed)
            multi_head_entity_vector = self.entity_msa(entity_embed)
            abstract_vector = self.entity_aa(multi_head_entity_vector)  # batch,100
        elif self.config.use_abstract_entity:
            entity_embed = self.entity_embedding(news["abstract_entities"].to(device))
            entity_embed = self.entity_positional(entity_embed)
            entity_embed = self.dropout(entity_embed)
            multi_head_entity_vector = self.entity_msa(entity_embed)
            entity_vector = self.entity_aa(multi_head_entity_vector)  # batch,100
            abstract_vector = torch.tanh(
                torch.add(torch.matmul(entity_vector, self.transform_matrix),
                          self.transform_bias))
        else:  # content vector represent : batch_size,embed_dim
            content_embed = self.word_level_embedding(news['abstract'].to(device))
            content_embed = self.word_positional(content_embed)
            content_embed = self.dropout(content_embed)
            multi_head_abstract_vector = self.word_msa(content_embed)
            abstract_vector = self.word_aa(multi_head_abstract_vector)

        # title vector represent: batch_size,word_embedding_dim
        if self.config.use_title_entity:
            # title vector represent: batch_size,word_embedding_dim
            title_embed = self.entity_embedding(news["title_entities"].to(device))  # batch_size,seq_len,embed_dim
            title_embed = self.entity_positional(title_embed)
            title_embed = self.dropout(title_embed)
            multi_head_title_vector = self.entity_msa(title_embed)
            title_vector = self.entity_aa(multi_head_title_vector)
        else:
            title_embed = self.word_level_embedding(news["title"].to(device))  # batch_size,seq_len,embed_dim
            title_embed = self.word_positional(title_embed)
            title_embed = self.dropout(title_embed)
            multi_head_title_vector = self.word_msa(title_embed)
            title_vector = self.word_aa(multi_head_title_vector)

        # news_vector:batch_size,4,embed_dim
        news_vector = torch.stack([category_vector, subcategory_vector, title_vector, abstract_vector],
                                  dim=1)
        # final_news_vector : batch_size,embed_dim
        final_news_vector = self.final_additive_attention(news_vector)
        return final_news_vector
