import torch
import torch.nn as nn
import math
import os

from utils.log_output import simple_text_log


class TitleEncoder(nn.Module):
    def __init__(self, hyper_params):
        super(TitleEncoder, self).__init__()

        self.embedding = nn.Embedding(
            num_embeddings=hyper_params['word_cnt'],
            embedding_dim=hyper_params['feature_dim'],
            padding_idx=0
        )
        
        self.trans_encoder = nn.TransformerEncoderLayer(
            d_model=hyper_params['feature_dim'],
            nhead=2,
            batch_first=True,
            dim_feedforward=64,
            dropout=0.2
        )
        self.transformer = nn.TransformerEncoder(
            self.trans_encoder,
            num_layers=2
        )
        self.linear_o = nn.Linear(hyper_params['feature_dim'], hyper_params['feature_dim'])

    def forward(self, title):
        """
            Input: b x l
            Output: b x d
        """
        # [b x l] -> [b x l x d]
        embed = self.embedding(title)

        # [b x l x d] -> [b x l x d]
        mask = (title != 0)
        encoding = self.transformer(embed, src_key_padding_mask=~mask)

        # [b x l x d] -> [b x d2]
        out = torch.sum(encoding, dim=1) / torch.sum(mask, dim=1, keepdim=True)
        out = self.linear_o(out)

        return out


class FM(nn.Module):
    def __init__(self, total_dim, fm_dim, len_threshold=20):
        super(FM, self).__init__()
        self.len_threshold = len_threshold

        self.w1 = nn.Linear(total_dim, 1)
        self.w2 = nn.Parameter(torch.empty((total_dim, fm_dim)), requires_grad=True)
        self.register_parameter('w2', self.w2)
        nn.init.kaiming_uniform_(self.w2, a=math.sqrt(5))

    def forward(self, x, user_len):
        """
            Input: b x d
            Output: b
        """
        first_order = self.w1(x).squeeze(1)
        second_order = 0.5 * torch.sum((x @ self.w2)**2 - (x**2) @ (self.w2**2), dim=1)
        y = first_order + second_order

        return y


class NewFMModel(nn.Module):
    def __init__(self, hyper_params):
        super(NewFMModel, self).__init__()
        self.hyper_params = hyper_params

        # Define book title encoder
        self.title_encoder = TitleEncoder(hyper_params)

        # Define embeddings
        self.book_id_embedding = nn.Embedding(
            num_embeddings=hyper_params['book_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.user_id_embedding = nn.Embedding(
            num_embeddings=hyper_params['user_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.type_embedding = nn.Embedding(
            num_embeddings=hyper_params['type_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.grade_embedding = nn.Embedding(
            num_embeddings=hyper_params['grade_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.school_embedding = nn.Embedding(
            num_embeddings=hyper_params['school_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.date_embedding = nn.Embedding(
            num_embeddings=hyper_params['date_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )
        self.sem_embedding = nn.Embedding(
            num_embeddings=hyper_params['sem_cnt'],
            embedding_dim=hyper_params['feature_dim']
        )

        # Define transformer layer
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=hyper_params['feature_dim'],
            nhead=2,
            dim_feedforward=64,
            dropout=0.3,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(
            encoder_layer=self.encoder_layer,
            num_layers=2
        )

        # Define FM layer
        self.fm = FM(
            total_dim=8*hyper_params['feature_dim'],
            fm_dim=hyper_params['fm_dim']
        )

    def load_previous_state(self, state_dict_path: str):
        print('model: Loading previous state...')
        try:
            state_dict = torch.load(state_dict_path, map_location=self.hyper_params['device'])
        except Exception as err:
            print(err)
            simple_text_log('error', str(err))
            simple_text_log('train', 'model: Fail to load previous state. Training a new model...')
            return

        self.title_encoder.embedding.weight.data[:state_dict['title_embedding'].size(0)] = state_dict['title_embedding']
        self.title_encoder.transformer.load_state_dict(state_dict['title_transformer'])
        self.title_encoder.linear_o.load_state_dict(state_dict['title_linear_o'])

        self.book_id_embedding.weight.data[:state_dict['book_id_embedding'].size(0)] = state_dict['book_id_embedding']
        self.user_id_embedding.weight.data[:state_dict['user_id_embedding'].size(0)] = state_dict['user_id_embedding']
        self.type_embedding.weight.data[:state_dict['type_embedding'].size(0)].data = state_dict['type_embedding']
        self.grade_embedding.weight.data[:state_dict['grade_embedding'].size(0)] = state_dict['grade_embedding']
        self.school_embedding.weight.data[:state_dict['school_embedding'].size(0)] = state_dict['school_embedding']
        self.date_embedding.weight.data[:state_dict['date_embedding'].size(0)] = state_dict['date_embedding']
        self.sem_embedding.weight.data[:state_dict['sem_embedding'].size(0)] = state_dict['sem_embedding']

        self.transformer.load_state_dict(state_dict['transformer'])
        self.fm.load_state_dict(state_dict['fm'])
        simple_text_log('train', 'Previous TransFM model loaded.')

    def save_state_dict(self, state_dict_path: str):
        print('model: Saving state_dict...')
        state_dict = {}

        state_dict['title_embedding'] = self.title_encoder.embedding.weight.detach()
        state_dict['title_transformer'] = self.title_encoder.transformer.state_dict()
        state_dict['title_linear_o'] = self.title_encoder.linear_o.state_dict()

        state_dict['book_id_embedding'] = self.book_id_embedding.weight.detach()
        state_dict['user_id_embedding'] = self.user_id_embedding.weight.detach()
        state_dict['type_embedding'] = self.type_embedding.weight.detach()
        state_dict['grade_embedding'] = self.grade_embedding.weight.detach()
        state_dict['school_embedding'] = self.school_embedding.weight.detach()
        state_dict['date_embedding'] = self.date_embedding.weight.detach()
        state_dict['sem_embedding'] = self.sem_embedding.weight.detach()

        state_dict['transformer'] = self.transformer.state_dict()
        state_dict['fm'] = self.fm.state_dict()

        os.makedirs(os.path.split(state_dict_path)[0], exist_ok=True)        
        torch.save(state_dict, state_dict_path)
        print('model: state_dict saved.')

    def forward(self, user_id, user_school, user_grade, user_len, date, sem, book_id, book_title, book_type):
        uid_embed = self.user_id_embedding(user_id)
        school_embed = self.school_embedding(user_school)
        grade_embed = self.grade_embedding(user_grade)
        bid_embed = self.book_id_embedding(book_id)
        title_embed = self.title_encoder(book_title)
        type_embed = self.type_embedding(book_type)
        date_embed = self.date_embedding(date)
        sem_embed = self.sem_embedding(sem)

        embed = torch.stack([uid_embed, school_embed, grade_embed, bid_embed, title_embed, type_embed, date_embed, sem_embed], dim=1)
        encoded = self.transformer(embed).reshape(embed.size(0), -1)

        y = self.fm(encoded, user_len)
        return y
