"""
Created on 2020/4/19 16:55

@author: zhouweixin
@note: 
"""


import os
import sys
import time
import numpy as np
import torch
from torch import nn, optim
from torch.autograd import Variable
from core import config
from util import utils

MSELoss = nn.MSELoss()


def train(model, train_loader, eval_loader=None, hyperparas=None, output_path=config.result_output_path):
    crit = LanguageModelCriterion()

    num_epoches = hyperparas['num_epoches']
    lr = hyperparas['lr']
    gpu = hyperparas['gpu']
    is_eval = hyperparas['is_eval']
    norm_lambda = hyperparas['norm_lambda']
    weight_decay = hyperparas['weight_decay']
    alpha = hyperparas['alpha']

    if gpu:
        model = model.cuda()

    optimizer = optim.Adamax(model.parameters(), lr=lr, weight_decay=weight_decay)

    utils.create_dir(output_path)
    logger = utils.Logger(os.path.join(output_path, 'log.txt'), hyperparas)
    best_rmse = None
    best_both_loss = None

    for epoch in range(num_epoches):
        torch.cuda.empty_cache()
        r_total_loss = 0.
        y_total_loss = 0
        total_loss = 0

        start_time = time.time()
        for i, (user_id, item_id, user_reviews, item_reviews, user_rids, item_rids, ratings, y, y_lengths) in enumerate(train_loader):
            torch.cuda.empty_cache()
            sys.stdout.write('\rtrain: %d / %d' % (i+1, len(train_loader)))

            if gpu:
                user_id = Variable(user_id).cuda()
                item_id = Variable(item_id).cuda()
                user_reviews = Variable(user_reviews).cuda()
                item_reviews = Variable(item_reviews).cuda()
                user_rids = Variable(user_rids).cuda()
                item_rids = Variable(item_rids).cuda()
                ratings = Variable(ratings).cuda()
                y = Variable(y).cuda()
                y_lengths = Variable(y_lengths).cuda()
            else:
                user_id = Variable(user_id)
                item_id = Variable(item_id)
                user_reviews = Variable(user_reviews)
                item_reviews = Variable(item_reviews)
                user_rids = Variable(user_rids)
                item_rids = Variable(item_rids)
                ratings = Variable(ratings)
                y = Variable(y)
                y_lengths = Variable(y_lengths)

            y_input = y[:, :-1]
            y_output = y[:, 1:]
            y_lengths = y_lengths - 1
            y_lengths[y_lengths <= 0] = 1

            rating_pred, y_pred, r_loss = model(user_id, item_id, user_reviews, item_reviews, user_rids, item_rids, ratings, y_input, y_lengths, norm_lambda)

            y_mask = torch.arange(y_lengths.cpu().max().item())[None, :] < y_lengths[:, None].cpu()
            y_mask = y_mask.float()
            if gpu:
                y_mask = y_mask.cuda()

            y_output = y_output[:, :y_lengths.max()]
            y_loss = crit(y_pred, y_output, y_mask)

            y_total_loss += y_loss * rating_pred.shape[0]

            both_loss = r_loss + alpha * y_loss * rating_pred.shape[0]
            total_loss += both_loss

            optimizer.zero_grad()
            both_loss.backward()
            optimizer.step()

            r_total_loss += r_loss.item() * ratings.size(0)
            if (i+1) % 100 == 0:
                sys.stdout.write('\r')
                print('Epoch %d, iter %05d, loss = %.6f' % (epoch + 1, (i+1), both_loss.item()))

        r_loss = r_total_loss / len(train_loader.dataset)
        y_loss = y_total_loss / len(train_loader.dataset)
        train_mse = r_loss
        loss = total_loss / len(train_loader.dataset)

        if is_eval:
            # 验证集
            model.eval()
            # model.train(False)
            # with torch.no_grad:
            val_mse, val_rmse, val_y_loss = evaluate(model, eval_loader, gpu)
            val_both_loss = val_mse + alpha * y_loss
            model.train()

            if best_both_loss is None:
                best_both_loss = val_both_loss

            if best_both_loss > val_both_loss:
                model_path = os.path.join(output_path, 'model.pth')
                torch.save(model.state_dict(), model_path)
                best_both_loss = val_both_loss

        logger.write('Epoch %d, loss = %.6f, time: %.2f' % (epoch + 1, loss, time.time() - start_time))
        logger.write('\ttrain_mse = %.6f, train_rmse = %.6f, train_y_loss = %.6f' % (train_mse, np.sqrt(train_mse), y_loss))
        if is_eval:
            logger.write('\tval_mse = %.6f, val_rmse = %.6f, best_both_loss = %.6f, val_y_loss = %.6f' % (val_mse, val_rmse, best_both_loss, val_y_loss))


def evaluate(model, eval_loader, gpu, save_att=False):
    crit = LanguageModelCriterion()
    total_mse = 0
    total_y_loss = 0

    for i, (user_id, item_id, user_reviews, item_reviews, user_rids, item_rids, ratings, y, y_lengths) in enumerate(eval_loader):
        torch.cuda.empty_cache()
        sys.stdout.write('eval: %d / %d     \r' % (i + 1, len(eval_loader)))

        if gpu:
            user_id = Variable(user_id).cuda()
            item_id = Variable(item_id).cuda()
            user_reviews = Variable(user_reviews).cuda()
            item_reviews = Variable(item_reviews).cuda()
            user_rids = Variable(user_rids).cuda()
            item_rids = Variable(item_rids).cuda()
            ratings = Variable(ratings).cuda()
            y = Variable(y).cuda()
            y_lengths = Variable(y_lengths).cuda()
        else:
            user_id = Variable(user_id)
            item_id = Variable(item_id)
            user_reviews = Variable(user_reviews)
            item_reviews = Variable(item_reviews)
            user_rids = Variable(user_rids)
            item_rids = Variable(item_rids)
            ratings = Variable(ratings)
            y = Variable(y)
            y_lengths = Variable(y_lengths)

        y_input = y[:, :-1]
        y_output = y[:, 1:]
        y_lengths = y_lengths - 1
        y_lengths[y_lengths <= 0] = 1

        with torch.no_grad():
            rating_pred, y_pred, r_loss = model(user_id, item_id, user_reviews, item_reviews, user_rids, item_rids, ratings,
                                            y_input, y_lengths, save_att=save_att)

        y_mask = torch.arange(y_lengths.cpu().max().item())[None, :] < y_lengths[:, None].cpu()
        y_mask = y_mask.float()
        if gpu:
            y_mask = y_mask.cuda()
        y_loss = crit(y_pred, y_output, y_mask)

        mse = MSELoss(rating_pred, ratings)
        total_mse += mse.item() * ratings.size(0)
        total_y_loss += y_loss * ratings.size(0)

    mse = total_mse / len(eval_loader.dataset)
    rmse = np.sqrt(mse)
    y_loss = total_y_loss / len(eval_loader.dataset)
    return mse, rmse, y_loss


class MyLoss(nn.Module):
    def __init__(self):
        super(MyLoss, self).__init__()

    def forward(self, model, prediction, rating, norm_lambda):

        l2_loss = 0.
        l2_loss += utils.l2_loss(model.user_reviews_att.review_linear.weight)
        l2_loss += utils.l2_loss(model.user_reviews_att.id_linear.weight)
        l2_loss += utils.l2_loss(model.item_reviews_att.review_linear.weight)
        l2_loss += utils.l2_loss(model.item_reviews_att.id_linear.weight)

        loss = utils.l2_loss(prediction, rating)

        return loss + norm_lambda * l2_loss


class LanguageModelCriterion(nn.Module):
    def __init__(self):
        super(LanguageModelCriterion, self).__init__()

    def forward(self, input, target, mask):
        # input [batch, length, vocab_size] 29
        # target [batch, length] 31
        # mask [batch, 1, length] 29

        input = input.contiguous().view(-1, input.size(2)) # [batch * length, vocab_size]
        target = target.contiguous().view(-1, 1)    # [batch * length, 1]
        mask = mask.contiguous().view(-1, 1)        # [batch * length, 1]

        if input.size(0) != target.size(0):
            target = target[:input.size(0), :]

        output = -input.gather(1, target) * mask  # [batch * length, 1]
        output = torch.sum(output) / torch.sum(mask)
        # try:
        #
        # except Exception as e:
        #     print(e)
        #     print(input.size())
        #     print(target.size())
        #     print(mask.size())

        return output