import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class TitleEncoder(nn.Module):
    def __init__(self, hyper_params):
        super(TitleEncoder, self).__init__()

        self.embedding = nn.Embedding(
            num_embeddings=hyper_params['word_cnt'],
            embedding_dim=hyper_params['feature_dim'],
            padding_idx=0
        )
        
        self.trans_encoder = nn.TransformerEncoderLayer(
            d_model=hyper_params['feature_dim'],
            nhead=2,
            batch_first=True,
            dim_feedforward=64,
            dropout=0.2
        )
        self.transformer = nn.TransformerEncoder(
            self.trans_encoder,
            num_layers=2
        )
        self.linear_o = nn.Linear(hyper_params['feature_dim'], hyper_params['feature_dim'])

    def forward(self, title):
        """
            Input: b x l
            Output: b x d
        """
        # [b x l] -> [b x l x d]
        embed = self.embedding(title)

        # [b x l x d] -> [b x l x d]
        mask = (title != 0)
        encoding = self.transformer(embed, src_key_padding_mask=~mask)

        # [b x l x d] -> [b x d2]
        out = torch.sum(encoding, dim=1) / torch.sum(mask, dim=1, keepdim=True)
        out = self.linear_o(out)

        return out


class FM(nn.Module):
    def __init__(self, total_dim, fm_dim):
        super(FM, self).__init__()

        self.w1 = nn.Linear(total_dim, 1)
        self.w2 = nn.Parameter(torch.empty((total_dim, fm_dim)), requires_grad=True)
        self.register_parameter('w2', self.w2)
        nn.init.kaiming_uniform_(self.w2, a=math.sqrt(5))

    def forward(self, x):
        """
            Input: b x d
            Output: b
        """
        first_order = self.w1(x).squeeze(1)
        second_order = 0.5 * torch.sum((x @ self.w2)**2 - (x**2) @ (self.w2**2), dim=1)

        y = first_order + second_order
        return y


class NewFMModel(nn.Module):
    def __init__(self, hyper_params):
        super(NewFMModel, self).__init__()
        self.hyper_params = hyper_params

        # Define book title encoder
        self.title_encoder = TitleEncoder(hyper_params)

        # Define embeddings
        self.book_id_embedding = nn.Embedding(
            num_embeddings=hyper_params['book_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.user_id_embedding = nn.Embedding(
            num_embeddings=hyper_params['user_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.type_embedding = nn.Embedding(
            num_embeddings=hyper_params['type_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.grade_embedding = nn.Embedding(
            num_embeddings=hyper_params['grade_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.school_embedding = nn.Embedding(
            num_embeddings=hyper_params['school_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )

        # Define transformer layer
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=hyper_params['feature_dim'],
            nhead=2,
            dim_feedforward=64,
            dropout=0.3,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(
            encoder_layer=self.encoder_layer,
            num_layers=2
        )

        # Define FM layer
        self.fm = FM(
            total_dim=6*hyper_params['feature_dim'],
            fm_dim=hyper_params['fm_dim']
        )

    def forward(self, user_id, user_school, user_grade, book_id, book_title, book_type):
        uid_embed = self.user_id_embedding(user_id)
        school_embed = self.school_embedding(user_school)
        grade_embed = self.grade_embedding(user_grade)
        bid_embed = self.book_id_embedding(book_id)
        title_embed = self.title_encoder(book_title)
        type_embed = self.type_embedding(book_type)

        embed = torch.stack([uid_embed, school_embed, grade_embed, bid_embed, title_embed, type_embed], dim=1)
        encoded = self.transformer(embed).reshape(embed.size(0), -1)

        y = self.fm(encoded)
        return y
