import json
import torch
import gensim
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from gensim.models.word2vec import LineSentence
from configs import Config


class Embedding_pretrained:
    def __init__(self, config):
        model = gensim.models.Word2Vec(
            LineSentence(config.word2vec_data),
            vector_size=200, window=5,
            min_count=3, workers=4,
        )
        model.wv.save_word2vec_format(config.gensim_save, binary=True)
        # reload word2vec
        word2vec = gensim.models.keyedvectors.KeyedVectors.load_word2vec_format(config.gensim_save, binary=True)
        np.save(config.gensim_embedding, word2vec.vectors)
        json.dump(word2vec.key_to_index, open(config.gensim_word2index, 'w'))
        print('Gensim train model over!')


class LSTM_pretrained(nn.Module):
    def __init__(self, config):
        super(LSTM_pretrained, self).__init__()
        self.word_embedding = nn.Embedding.from_pretrained(
            torch.Tensor(np.load(config.gensim_embedding)), freeze=True
        )
        self.LSTM = nn.LSTM(config.embedding_dim, config.LSTM_embedding, batch_first=True)
        self.output_layer = nn.Linear(config.LSTM_embedding, config.vocab_num)
        self.device = config.device
        self.vocab_num = config.vocab_num
        self.char_output = None  # output tensor
        self.h_n = None  # hidden dim
        self.input_dict = {
            'sentence_index': None,  # sentence indexed.
            'target_index': None,  # target indexed.
        }

    def update(self, feed_dict):
        for key in self.input_dict:
            self.input_dict[key] = feed_dict[key].to(self.device)

    def forward(self):
        # x, [bs, len] --embedding--> embedding, [bs, len, feature]
        # embedding, [bs, len, feature] --LSTM--> hidden, [bs, len, hidden]
        # hidden, [bs, len, hidden] --Linear--> out, [bs, len, vocab]
        x = self.input_dict['sentence_index']
        embedding = self.word_embedding(x)
        outputs, (self.h_n, _) = self.LSTM(embedding)
        self.char_output = self.output_layer(outputs)
        return self.char_output

    def get_sentence_embedding(self):
        return self.h_n

    def loss_fn(self):
        rebuild_loss = torch.nn.functional.cross_entropy(
            self.char_output.view(-1, self.vocab_num), self.input_dict['target_index'].view(-1)
        )
        return rebuild_loss


class PMF(nn.Module):
    def __init__(self, config):
        super(PMF, self).__init__()
        self.device = config.device
        self.user_matrix = torch.normal(0, 0.1, (config.num_user, config.PMF_K))
        self.item_matrix = torch.normal(0, 0.1, (config.num_item, config.PMF_K))
        self.l2_regular = config.PMF_l2regular
        self.lr = config.PMF_LR
        self.user_path = config.PMF_user_path
        self.item_path = config.PMF_item_path

    def forward(self, user_id, item_id):
        user_id, item_id = user_id.to(self.device), item_id.to(self.device)
        output = torch.dot(self.user_matrix[user_id], self.item_matrix[item_id].T)
        return output

    def batch_forward(self, user_id, item_id):
        user_id, item_id = user_id.to(self.device), item_id.to(self.device)
        output = torch.cat(
            [
                torch.dot(self.user_matrix[user_id[i]], self.item_matrix[item_id[i]]).unsqueeze(0)
                for i in range(user_id.shape[0])
            ],
            dim=0,
        )
        return output

    def train_step(self, user_id, item_id, target):  # target: Tensor, [1, ]
        user_id, item_id, target = user_id.to(self.device), item_id.to(self.device), target.to(self.device)
        error = target - torch.dot(self.user_matrix[user_id], self.item_matrix[item_id])
        self.user_matrix[user_id], self.item_matrix[item_id] = \
            self.user_matrix[user_id] + self.lr * \
            (error * self.item_matrix[item_id] - self.l2_regular * self.user_matrix[user_id]), \
            self.item_matrix[item_id] + self.lr * \
            (error * self.user_matrix[user_id] - self.l2_regular * self.item_matrix[item_id])
        return error.abs()

    def weight_to(self, device):
        self.user_matrix = self.user_matrix.to(device)
        self.item_matrix = self.item_matrix.to(device)


class Embedding(nn.Module):
    def __init__(self, in_dim, out_dim):
        super(Embedding, self).__init__()
        self.embedding_lin = nn.Linear(in_dim, out_dim)

    def forward(self, x):
        print(F.one_hot(x).float().shape)
        return self.embedding_lin(
            F.one_hot(x).float()
        )


class Review2Rating(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.input_dict = {
            'sentence_embedding': None,  # torch.Tensor, [bs, LSTM_embdding]
            'user_id': None,  # torch.Tensor, [bs]
            'item_id': None,  # torch.Tensor, [bs]
            'rating_num': None,  # torch.Tensor, [bs]
        }
        self.vars = config.vars
        self.device = config.device
        self.language_trans = nn.Sequential(
            nn.Linear(config.LSTM_embedding, config.hidden_dim),
            nn.ReLU(),
        )
        self.user_embedding = nn.Embedding(config.num_user, config.hidden_dim)
        self.item_embedding = nn.Embedding(config.num_item, config.hidden_dim)
        self.MLPs = nn.Sequential(
            nn.Linear(2 * config.hidden_dim, config.hidden_dim),
            nn.ReLU(),
            nn.Linear(config.hidden_dim, config.hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(config.hidden_dim // 2, config.hidden_dim // 4),
            nn.ReLU()
        )
        self.predict = nn.Linear(config.hidden_dim // 4, 1)
        self.rating_predict = None  # torch.Tensor, [bs, 1]

    def update(self, feed_dict):
        for key in self.input_dict:
            self.input_dict[key] = feed_dict[key].to(self.device)

    def forward(self):
        """
                                             rating predict
                                                    ^
                                                    |
                                                   MLPs
                                                    |
        sentence_embedding--language_trans-->[concat, dim=1]
                                                    ^
                                            point-wise multiple
                                              ^             ^
                                     user_embedding     item_embedding
        """
        feed_dict = self.input_dict
        sentence_embedding = self.language_trans(feed_dict['sentence_embedding'])  # [bs, hidden_dim]
        user_embedding = self.user_embedding(feed_dict['user_id'])  # [bs, hidden_dim]
        item_embedding = self.item_embedding(feed_dict['item_id'])  # [bs, hidden_dim]
        user_item_join = user_embedding * item_embedding  # [bs, hidden_dim]
        language_user_item_join = torch.cat(
            [sentence_embedding, user_item_join], dim=1  # [bs, 2*hidden_dim]
        )
        preference = self.MLPs(language_user_item_join)  # [bs, hidden_dim // 2]
        self.rating_predict = self.predict(preference)  # [bs, 1]
        return self.rating_predict

    def loss_function(self):
        rating_tensor = torch.unsqueeze(self.input_dict['rating_num'], 1)
        l1_loss = torch.sum(((self.rating_predict - rating_tensor) ** 2) / (2 * self.vars))
        return l1_loss


class Rating2Review(nn.Module):
    def __init__(self, config):
        super(Rating2Review, self).__init__()
        self.input_dict = {
            'sentence_index': None,  # torch.Tensor, [bs, seq len]
            'user_id': None,  # torch.Tensor, [bs]
            'item_id': None,  # torch.Tensor, [bs]
            'rating_num': None,  # torch.Tensor, [bs]
            'target_index': None,  # torch.Tensor, [bs, seq len]
        }
        self.user_embedding = nn.Embedding(config.num_user, config.hidden_dim // 2)
        self.item_embedding = nn.Embedding(config.num_item, config.hidden_dim // 2)
        self.hidden_normal = nn.BatchNorm1d(config.hidden_dim)
        self.rating_embedding = nn.Embedding(config.num_rate, config.hidden_dim // 2)
        self.LSTM_model = nn.LSTM(config.embedding_dim, config.hidden_dim, batch_first=True)
        self.word_embedding = self.word_embedding = nn.Embedding.from_pretrained(
            torch.Tensor(np.load(config.gensim_embedding)), freeze=True
        )
        self.translate_model = nn.Linear(config.hidden_dim, config.vocab_num)
        self.num_rate = config.num_rate
        self.vocab_num = config.vocab_num
        self.decode_word = None
        self.device = config.device

    def update(self, feed_dict):
        for key in self.input_dict:
            self.input_dict[key] = feed_dict[key].to(self.device)

    def forward(self, debug=False):
        """
                                                             word  word  word
                                                              ^     ^     ^
                user_item_fused_embedding------concat------>L____S____T____M
                    ^      ^                     ^            ^     ^     ^
          user_embedding item_embedding      rating_em       wem   wem   wem    word embedding
              ^                 ^                ^            ^     ^     ^
          user_id           item_id        one-hot rating   word   word  word
        """
        feed_dict = self.input_dict
        user_embedding = self.user_embedding(feed_dict['user_id'])
        item_embedding = self.item_embedding(feed_dict['item_id'])
        user_item_fused_embedding = user_embedding * item_embedding
        rating_embedding = self.rating_embedding((feed_dict['rating_num'] - 1))
        hidden_state = torch.cat([user_item_fused_embedding, rating_embedding], dim=1)
        hidden_state = self.hidden_normal(hidden_state).unsqueeze(0)
        cell_state = torch.normal(0, 0.1, hidden_state.shape, device=self.device)
        wem = self.word_embedding(feed_dict['sentence_index'])
        outputs, _ = self.LSTM_model(wem, (hidden_state, cell_state))
        self.decode_word = self.translate_model(outputs)

        if debug:
            print('rating embedding weight: ',
                  self.rating_embedding.weight,
                  self.rating_embedding.weight.shape,
                  self.rating_embedding.weight.min(),
                  self.rating_embedding.weight.max(),
                  )
            # print('user embedding min max: ', user_embedding.min(), user_embedding.max())
            # print('item embedding min max: ', item_embedding.min(), item_embedding.max())
            # print('user item embedding min max: ', user_item_fused_embedding.min(), user_item_fused_embedding.max())
            # print('rating embedding min max: ', rating_embedding.min(), rating_embedding.max())
            # print('hidden state min max: ', hidden_state.min(), hidden_state.max())
            # print('cell state min max: ', cell_state.min(), cell_state.max())
            # print('word embedding min max: ', wem.min(), wem.max())
            # print('outputs min max: ', outputs.min(), outputs.max())
            # print('decode word min max: ', self.decode_word.min(), self.decode_word.max())

        return self.decode_word

    def loss_function(self):
        l2_loss = torch.nn.functional.cross_entropy(
            self.decode_word.view(-1, self.vocab_num), self.input_dict['target_index'].view(-1)
        )
        return l2_loss


class TransNet(nn.Module):
    def __init__(self, config):
        super(TransNet, self).__init__()
        self.translayer = nn.Linear(config.LSTM_embedding * 2, config.LSTM_embedding)
        self.input_dict = {
            'sentence_embedding_user_mean': None,
            'sentence_embedding_item_mean': None,
            'sentence_embedding_user_item': None,
        }
        self.device = config.device
        self.trans_embedding = None

    def update(self, feed_dict):
        for key in self.input_dict:
            self.input_dict[key] = feed_dict[key].to(self.device)

    def forward(self):
        # noinspection PyTypeChecker
        fused_embedding = torch.cat([
            self.input_dict['sentence_embedding_user_mean'],
            self.input_dict['sentence_embedding_item_mean'],
        ], dim=1)
        self.trans_embedding = self.translayer(fused_embedding)
        return self.trans_embedding

    def loss_function(self):
        # noinspection PyTypeChecker
        return F.cross_entropy(
            self.trans_embedding,
            self.input_dict['sentence_embedding_user_item'],
        )


if __name__ == '__main__':
    """
    Test whether the model can be fully trained.
    And give the example train code of each model.
    This part is only in cpu! If you want to use cuda, check device. 
    """
    config = Config()
    config.device = 'cpu'
    config.num_user = 5
    config.num_item = 5
    config.vocab_num = 5

    batch_size = 32
    seq_len = 50

    random_rating = torch.randint(1, config.num_rate + 1, (batch_size,))
    print(random_rating.shape)

    random_sentence = torch.randint(0, config.vocab_num, (batch_size, seq_len,))
    print(random_sentence.shape)

    random_sentence_embedding = torch.normal(0, 0.1, (batch_size, config.LSTM_embedding))
    print(random_sentence_embedding.shape)

    random_user = torch.randint(0, config.num_user, (batch_size,))
    print(random_user.shape)

    random_item = torch.randint(0, config.num_item, (batch_size,))
    print(random_item.shape)

    target_sentence = torch.cat([random_sentence[:, 1:], torch.zeros((batch_size, 1))], dim=1).long()
    print(target_sentence.shape, target_sentence.dtype)

    Re2Ra = Review2Rating(config)
    Ra2Re = Rating2Review(config)

    Re2Ra_optimizer = torch.optim.Adam(Re2Ra.parameters(), lr=1e-4)
    Ra2Re_optimizer = torch.optim.Adam(Ra2Re.parameters(), lr=1e-4)

    print('Model and Optimizer build over!')

    input_dict = {
        'sentence_embedding': random_sentence_embedding,
        'sentence_index': random_sentence,
        'user_id': random_user,
        'item_id': random_item,
        'rating_num': random_rating,
        'vars': 1.0,
    }

    Re2Ra.update(input_dict)
    Ra2Re.update(input_dict)

    print('input parameters over!')

    rating_predict = Re2Ra()
    print('Review to Rating ok!')
    # print(rating_predict)

    review_predict = Ra2Re()
    print('Rating to Review ok!')
    # print(review_predict)

    loss_Re2Ra = ((random_rating.unsqueeze(1) - rating_predict) ** 2 / 2.0).sum()
    print('Loss Re2Ra is ', loss_Re2Ra, loss_Re2Ra.shape)

    loss_Ra2Re = F.cross_entropy(review_predict.view(-1, config.vocab_num), target_sentence.view(-1, ))
    print('Loss Ra2Re is ', loss_Ra2Re, loss_Ra2Re.shape)

    loss_Re2Ra.backward()
    print('Loss Re2Ra backward')

    loss_Ra2Re.backward()
    print('Loss Ra2Re backward')

    Ra2Re_optimizer.step()
    Re2Ra_optimizer.step()
    print('optimizer step over!')

    torch.save(Ra2Re.state_dict(), 'Ra2Re.pth')
    torch.save(Re2Ra.state_dict(), 'Re2Ra.pth')
    print('torch save over!')
