# -*- coding: utf-8 -*-
from __future__ import print_function

import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim

from ddl_platform.ddlib import job

# for neumf
import ncf
import data_utils
import mconfig
import evaluate

import os
#import models
import torchvision.models as models

class NEUMFJob(job.Job):
    def build_dataset(self):
        config = self.config()['dataset']
        data_dir = config['data_dir']

        ############################## PREPARE DATASET ##########################
        train_data, test_data, user_num, item_num, train_mat = data_utils.load_all(data_dir)
        self._user_num = user_num
        self._item_num = item_num
        self._train_mat = train_mat
        
        # construct the train and test datasets
        trainset = data_utils.NCFData(
            train_data, item_num, train_mat, 4, True)
        testset = data_utils.NCFData(
            test_data, item_num, train_mat, 0, False)

        trainset.ng_sample()
        return trainset, testset

    def build_model(self):
        GMF_model = None
        MLP_model = None
        model = ncf.NCF(self._user_num, self._item_num, 32, 3, 
                          0.0, mconfig.model, GMF_model, MLP_model)
        return model

    def build_optimizer(self):
        model = self.model()
        config = self.config()['optimizer']
        optimizer = optim.Adam(model.parameters(), betas=(0.9, 0.999), lr=config['lr'])
        self._scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [4, 7, 10], gamma=0.2)
        return optimizer

    def build_criterion(self):
        return nn.BCEWithLogitsLoss()

    def cal_eval_performance(self, batch_outputs, batch_inputs):

        user, item, label = batch_inputs
        _, indices = torch.topk(batch_outputs, 10)
        recommends = torch.take(
            item, indices).cpu().numpy().tolist()

        gt_item = item[0].item()
        HR = evaluate.hit(gt_item, recommends)
        NDCG = evaluate.ndcg(gt_item, recommends)
        return HR

    def training_step(self, batch, model):

        self._scheduler.step()

        user, item, label = batch
        user = user.cuda()
        item = item.cuda()
        label = label.float().cuda()

        prediction = model(user, item)

        loss = self.criterion()(prediction, label)

        return loss, prediction


#def train():
#    conf_yaml = 'mnist.yaml'
#    job = MnistJob(conf_yaml)
#    t = trainer.Trainer(job)
#    t.fit()
#
#if __name__ == '__main__':
#    train()
