import os
import unittest

import timm
import torch
from PIL import Image
from torch import hub, nn
from torch.backends import cudnn
from torch.utils.data import dataset
from torchsummary import summary
from torchvision import transforms, datasets

from config import Config
from training.models.xception_aligned import xception65
from training.tools.model_utils import validate

CONFIG = Config()
hub.set_dir(CONFIG['TORCH_HOME'])
os.environ["CUDA_VISIBLE_DEVICES"] = CONFIG['CUDA_VISIBLE_DEVICES']

torch.backends.cudnn.benchmark = True


class XceptionTestCase(unittest.TestCase):

    def test_gluon_xception_summary(self):
        self.assertTrue(torch.cuda.is_available())
        model = timm.create_model('gluon_xception65')
        model = model.cuda()
        input_size = model.default_cfg['input_size']
        summary(model, input_size=input_size)

    def test_gluon_xception(self):
        model = timm.create_model('gluon_xception65')
        model = model.cuda()
        criterion = nn.CrossEntropyLoss().cuda()

        valdir = os.path.join(CONFIG['IMAGENET_HOME'], 'val')
        self.assertEqual(True, os.path.exists(valdir))

        input_size = model.default_cfg['input_size']
        resize = int(input_size[1] / model.default_cfg['crop_pct'])
        val_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder(valdir, transforms.Compose([
                transforms.Resize(resize, Image.BICUBIC),
                transforms.CenterCrop((input_size[1], input_size[2])),
                transforms.ToTensor(),
                transforms.Normalize(mean=model.default_cfg['mean'], std=model.default_cfg['std']),
            ])),
            batch_size=10, shuffle=False,
            num_workers=1, pin_memory=True)

        validate(val_loader, model, criterion)

    def test_xception65_summary(self):
        self.assertTrue(torch.cuda.is_available())
        model = xception65(pretrained=True)
        model = model.cuda()
        input_size = model.default_cfg['input_size']
        summary(model, input_size=input_size)

    def test_xception65(self):
        model = xception65(pretrained=True)
        model = model.cuda()
        criterion = nn.CrossEntropyLoss().cuda()

        valdir = os.path.join(CONFIG['IMAGENET_HOME'], 'val')
        self.assertEqual(True, os.path.exists(valdir))

        input_size = model.default_cfg['input_size']
        resize = int(input_size[1] / model.default_cfg['crop_pct'])
        val_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder(valdir, transforms.Compose([
                transforms.Resize(resize, Image.BICUBIC),
                transforms.CenterCrop((input_size[1], input_size[2])),
                transforms.ToTensor(),
                transforms.Normalize(mean=model.default_cfg['mean'], std=model.default_cfg['std']),
            ])),
            batch_size=20, shuffle=False,
            num_workers=1, pin_memory=True)

        validate(val_loader, model, criterion)

    def test_timm_xception65_summary(self):
        self.assertTrue(torch.cuda.is_available())
        model = timm.create_model('xception65')
        model = model.cuda()
        input_size = model.default_cfg['input_size']
        summary(model, input_size=input_size)


if __name__ == '__main__':
    unittest.main()
