import time

import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.cuda import amp
from torch.optim import lr_scheduler

import DataUtils
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import DataUtils
import os
from torchvision import datasets, transforms
import Models

parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--use_gpu',default=True, type=str,help='gpu id')
parser.add_argument('--data_dir',default='E:/BaiduNetdiskDownload/Market1501/pytorch',type=str, help='training dir path')
parser.add_argument('--batchsize', default=5, type=int, help='batchsize')
parser.add_argument('--lr', default=0.06, type=float, help='learning rate')
opt = parser.parse_args()

y_loss = {}  # loss history
y_loss['train'] = []
y_loss['val'] = []
y_err = {}
y_err['train'] = []
y_err['val'] = []
y_acc = {}
y_acc['train'] = []
y_acc['val'] = []


def save_network(network, epoch_label):
    save_filename = 'net_%s.pth' % epoch_label
    save_path = './model/' + save_filename
    torch.save(network.state_dict(), save_path)

def train_model(model, criterion, optimizer, scheduler, epochs=60):
    since = time.time()
    for epoch in range(epochs):
        epoch_since = time.time()
        print('epoch {} / {}'.format(epoch + 1, epochs))
        for item in ['train', 'val']:
            if item == 'train':
                scheduler.step()
                model.train(True)
            else:
                model.train(False)

            running_loss = 0.0
            running_correct = 0.0
            for data in dataloaders[item]:
                inputs, labels = data
                now_batch_size, c, h, w = inputs.shape
                if now_batch_size < opt.batchsize:
                    continue
                inputs = Variable(inputs.cuda().detach()) if opt.use_gpu == True else Variable(inputs)
                labels = Variable(labels.cuda().detach()) if opt.use_gpu == True else Variable(labels)

                optimizer.zero_grad()
                if item == 'val':
                    with torch.no_grad():
                        outputs = model(inputs)
                else:
                    outputs = model(inputs)

                _, preds_idxs = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels)

                if item == 'train':
                    loss.backward()
                optimizer.step()

                running_loss += loss.item() * now_batch_size
                # print('running loss: {}'.format(running_loss))
                running_correct += float(torch.sum(preds_idxs == labels.data))

            epoch_loss = running_loss / dataset_sizes[item]
            epoch_acc = running_correct / dataset_sizes[item]

            print('{} epoc {} / {}, loss: {:.4f} acc: {:.4f}'.format(
                item, epoch + 1, epochs, epoch_loss, epoch_acc))

            y_loss[item].append(epoch_loss)
            y_err[item].append(1.0 - epoch_acc)
            y_acc[item].append(epoch_acc)

            if item == 'val':
                last_model_wts = model.state_dict()
                if epoch % 4 == 3:
                    save_network(model, epoch)

        epoch_end = time.time() - epoch_since
        print('epoch {} train complete in {:.0f}m {:.0f}s'.format(epoch + 1, epoch_end // 60, epoch_end % 60))
    end = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        end // 60, end % 60))
    print('loss: {}', y_loss)
    print('err: {}', y_err)
    print('acc: {}', y_acc)
    save_list('train_loss.txt', y_loss['train'])
    save_list('val_loss.txt', y_loss['val'])
    save_list('train_err.txt', y_err['train'])
    save_list('val_err.txt', y_err['val'])
    save_list('train_acc.txt', y_acc['train'])
    save_list('val_acc.txt', y_acc['val'])

    model.load_state_dict(last_model_wts)
    save_network(model, 'last')
    return model


def save_list(file_name, to_save):
    file = open(file_name, 'a')
    for item in to_save:
        file.write(str(item))
        file.write('\n')
    file.close()

if __name__ == '__main__':
    data_dir = opt.data_dir
    gpu_id = 0 if opt.use_gpu == True else -1
    if gpu_id == 0:
        torch.cuda.set_device(gpu_id)
        cudnn.benchmark = True

    #load data
    image_datasets = {}
    # ImageFolder 数据加载器，指定路径下加载并执行组合好的transforms操作
    image_datasets['train'] = datasets.ImageFolder(os.path.join(data_dir, 'train_all'),
                                                   DataUtils.data_transforms['train'])
    image_datasets['val'] = datasets.ImageFolder(os.path.join(data_dir, 'val'),
                                                 DataUtils.data_transforms['val'])

    dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
                                                 shuffle=True, num_workers=8, pin_memory=True)
                  for x in ['train', 'val']}
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

    model = Models.resnet50().cuda()


    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))


    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
    criterion = nn.CrossEntropyLoss()

    model = train_model(model, criterion, optimizer, exp_lr_scheduler, 1)
