import torch
from torch import nn
from torch.nn import Embedding, Linear, LayerNorm

from exp.modules import MultiHeadSelfAttention, AdditiveAttention
from general.positionalEncoder import PositionalEncoder

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class UserEncoder(torch.nn.Module):
    """
    user short-term interest represent
    """

    def __init__(self, config):
        super(UserEncoder, self).__init__()
        self.config = config

        # user id,embed_dim与title的相同，因为要相乘做click predict
        self.user_embedding = Embedding(num_embeddings=config.num_users,
                                        embedding_dim=config.word_embedding_dim,
                                        padding_idx=0)

        # click history
        if not self.config.use_title_entity:
            num_hiddens = 300
            msa_embedding_dim = self.config.word_embedding_dim
            num_attention_heads = self.config.num_attention_heads
            query_vector_dim = self.config.query_vector_dim
        else:
            num_hiddens = 100
            msa_embedding_dim = self.config.entity_embedding_dim
            num_attention_heads = self.config.entity_num_attention_heads
            query_vector_dim = self.config.entity_query_vector_dim

        self.positional = PositionalEncoder(num_hiddens=num_hiddens, dropout=config.dropout_probability)
        self.multihead_self_attention = MultiHeadSelfAttention(msa_embedding_dim,
                                                               num_attention_heads)
        self.additive_attention = AdditiveAttention(
            query_vector_dim, msa_embedding_dim)

        # final linear
        self.final_linear = Linear(in_features=2 * config.word_embedding_dim,
                                   out_features=config.word_embedding_dim)

        # todo
        self.dropout = nn.Dropout(p=config.dropout_probability)

    def forward(self, user, clicked_news_vector):
        """

        Args:
            user: user id
            clicked_news_vector: user clicked history

        Returns:

        """
        # user_long_vector : 128 300
        # user_long_vector = self.user_embedding(user.to(device))

        # user_short_vector : 128 300
        clicked_news_vector = self.positional(clicked_news_vector)
        multihead_user_vector = self.multihead_self_attention(clicked_news_vector)
        user_short_vector = self.additive_attention(multihead_user_vector)

        # todo  低版本:cat  高版本：concat
        # user_vector = torch.cat([user_long_vector, user_short_vector], dim=1)

        final_user_vector = user_short_vector

        return final_user_vector
