

import os
from sys import flags
import time
import torch

from torch.autograd import Variable
from torchvision import datasets, transforms, models
from utils.initdataset import DatasetTools
from config.config import Config


def init_dataset(config):

    if os.path.exists(os.path.join(config.train_dataset_path, 'train', 'ID1')):
        print('Dataset Already Init')
    else:
        print('Start Init Dataset')
        dataset_tools = DatasetTools(config.train_raw_dataset_path, config.train_dataset_path)
        dataset_tools.start_convert()
        dataset_tools.get_label()
        dataset_tools.split_train_valid(train_scale=0.9)

def train(config):

    data_dir = config.train_dataset_path
    data_transform = {x:transforms.Compose([transforms.Resize([224,224]),
                        transforms.ToTensor(),
                        transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])])
                    for x in ['train', 'valid']}

    image_datasets = {x:datasets.ImageFolder(root = os.path.join(data_dir,x),
                        transform=data_transform[x])
                    for x in ['train', 'valid']}

    dataloader = {x:torch.utils.data.DataLoader(dataset = image_datasets[x],
                    batch_size=config.batch_size,
                    shuffle=True)
                for x in ['train', 'valid']}

    model = models.alexnet(pretrained=False)
    
    # for parma in model.parameters():
    #     parma.requires_grad = False

    model.classifier = torch.nn.Sequential(
        torch.nn.Dropout(),
        torch.nn.Linear(256 * 6 * 6, 4096),
        torch.nn.ReLU(inplace=True),
        torch.nn.Dropout(),
        torch.nn.Linear(4096, 4096),
        torch.nn.ReLU(inplace=True),
        torch.nn.Linear(4096, 20),
    )
    
    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model)
        model = model.cuda()

    loss_f = torch.nn.CrossEntropyLoss()
    

    epoch_n = 10

    for epoch in range(epoch_n):
        
        if epoch in config.train_epoch_alexnet:
            config.learning_rate_alexnet /= 10

        optimizer = torch.optim.Adam(model.parameters(), lr = config.learning_rate_alexnet)
        print('-'*20)
        print('Epoch [{}]/{}'.format(epoch + 1, epoch_n))

        for phase in ['train', 'valid']:
            if phase == 'train':
                model.train(True)
            else:
                model.train(False)

            running_loss = 0.0
            running_corrects = 0

            for batch, data in enumerate(dataloader[phase], 1):
                X, y = data
                X, y = Variable(X), Variable(y)
                if torch.cuda.is_available():
                    X = X.cuda()
                    y = y.cuda()
                y_pred = model(X)
                _, pred = torch.max(y_pred.data, 1)
                optimizer.zero_grad()
                loss = loss_f(y_pred, y)
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                running_loss += loss.item()
                running_corrects += torch.sum(pred == y.data)
                if batch%10 == 0 and phase == 'train':
                    if batch < 100:
                        print('| Batch [0{}] | Training Loss:{:.4f} | Train Acc:{:.4f} |'.\
                            format(batch, running_loss/batch, float(100*running_corrects)/(config.batch_size*batch)))
                    else:
                        print('| Batch [{}] | Training Loss:{:.4f} | Train Acc:{:.4f} |'.\
                            format(batch, running_loss/batch, float(100*running_corrects)/(config.batch_size*batch)))
            epoch_loss = running_loss*config.batch_size/len(image_datasets[phase])
            epoch_acc = float(100*running_corrects)/len(image_datasets[phase])
            print('| [{} Loss] {:.4f} | [Acc] {:.4f}% |'.format(phase, epoch_loss, epoch_acc), end='')
        print()
        if (epoch+1) % 50 == 0:
            torch.save(model, os.path.join(config.model_root_path, 'epoch'+str(epoch+1)+'_alexnet.pth')) 
    torch.save(model, './alexnet.pth')

if __name__ == '__main__':
    config = Config()
    init_dataset(config)
    train(config)