# -*- coding: utf-8 -*-
from __future__ import print_function

import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim

from ddl_platform.ddlib import job
import os
import models

class CifarResNetJob(job.Job):
    def build_dataset(self):
        config = self.config()['dataset']
        data_dir = config['data_dir']

        ## Data loading code
        #traindir = os.path.join(data_dir, 'train')
        #testdir = os.path.join(data_dir, 'val')

        normalize = transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.247, 0.243, 0.262])
        train_transform = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
            ])
        test_transform = transforms.Compose([
                transforms.ToTensor(),
                normalize,
                ])
        trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True,
                                                download=True, transform=train_transform)
        testset = torchvision.datasets.CIFAR10(root=data_dir, train=False,
                                               download=True, transform=test_transform)

        return trainset, testset


    def build_model(self):
        return models.__dict__['resnet20'](num_classes=1000)

    def build_optimizer(self):
        model = self.model()
        config = self.config()['optimizer']
        optimizer = optim.SGD(model.parameters(), 
                lr=config['lr'],
                momentum=config['momentum'])
        return optimizer

    def build_criterion(self):
        return nn.CrossEntropyLoss()

    def cal_eval_performance(self, batch_outputs, batch_inputs):
        return super().cal_eval_performance(batch_outputs, batch_inputs)


#def train():
#    conf_yaml = 'mnist.yaml'
#    job = MnistJob(conf_yaml)
#    t = trainer.Trainer(job)
#    t.fit()
#
#if __name__ == '__main__':
#    train()
