import time

import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.cuda import amp
from torch.optim import lr_scheduler

import DataUtils
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import DataUtils
import os
from torchvision import datasets, transforms
import Models
import DAM


from fp16utils import network_to_half
from fp16opt import FP16_Optimizer

import torch.cuda.amp.grad_scaler

import torch.cuda.amp.autocast_mode

parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--use_gpu',default=True, type=str,help='gpu id')
parser.add_argument('--person_data_dir',default='/all_dataset/zyj/datasets/person_train_datasets/',type=str, help='personReid pre train dir path')
parser.add_argument('--bg_data_dir',default='/all_dataset/zyj/datasets/bg_train_datasets/',type=str, help='bgReid pre train dir path')
parser.add_argument('--train_data_dir',default='E:/BaiduNetdiskDownload/Market1501/pytorch/train_all/',type=str, help='train dir path')
parser.add_argument('--pre_batchsize', default=5, type=int, help='batchsize')
parser.add_argument('--batchsize', default=5, type=int, help='batchsize')
parser.add_argument('--train_batch', default=5, type=int, help='batchsize')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
opt = parser.parse_args()

def save_network(network, epoch_label, type_name):
    save_filename = type_name + 'net_%s.pth' % epoch_label
    save_path = './model/' + save_filename
    torch.save(network.cpu().state_dict(), save_path)
    if torch.cuda.is_available():
        network.cuda(0)


def pre_train(Model, dataloader, optimizer, criterion, scheduler, type_name, epochs = 60):
    since = time.time()
    Model.train(True)
    for epoch in range(epochs):
        epoch_since = time.time()
        print('{} epoch {} / {}'.format(type_name, epoch + 1, epochs))
        scheduler.step()
        running_loss = 0.0
        for data in dataloader[type_name]:
            inputs, labels = data
            now_batch_size, c, h, w = inputs.shape
            if now_batch_size < opt.pre_batchsize:
                continue
            inputs = Variable(inputs.cuda().detach())
            labels = Variable(labels.cuda().detach())

            optimizer.zero_grad()

            person_outputs = Model(inputs)
            loss = criterion(person_outputs, labels)

            loss.backward()
            optimizer.step()

            running_loss += loss.item() * now_batch_size
        epoch_loss = running_loss / pre_datasets_size[type_name]

        print('{} epoc {} / {}, loss: {:.4f}'.format(
            type_name, epoch + 1, epochs, epoch_loss))
        if epoch % 10 == 0:
            save_network(Model, epoch, 'pre_train_' + type_name)
        epoch_end = time.time() - epoch_since
        print('{} epoch {} train complete in {:.0f}m {:.0f}s'.format(type_name, epoch + 1, epoch_end // 60, epoch_end % 60))

    end = time.time() - since
    print('{} Model Training complete in {:.0f}m {:.0f}s'.format(
        type_name, end // 60, end % 60))
    save_network(Model, 'last', 'pre_train_' + type_name)
    return Model






if __name__ == '__main__':

    person_data_dir = opt.person_data_dir
    bg_data_dir = opt.bg_data_dir
    gpu_id = 0 if opt.use_gpu == True else -1
    if gpu_id == 0:
        torch.cuda.set_device(gpu_id)
        cudnn.benchmark = True

    # pre train
    pre_train_datasets = {}
    pre_train_datasets['person'] = datasets.ImageFolder(person_data_dir, DataUtils.data_transforms['train'])
    pre_train_datasets['bg'] = datasets.ImageFolder(bg_data_dir, DataUtils.data_transforms['train'])

    pre_dataloaders = {x: torch.utils.data.DataLoader(pre_train_datasets[x], batch_size=opt.pre_batchsize, shuffle=True, num_workers = 8, pin_memory=True)
                       for x in ['person', 'bg']}
    pre_datasets_size = {x: len(pre_train_datasets[x]) for x in ['person', 'bg']}

    pModel = Models.resnet50().cuda()
    bgModel = Models.resnet50().cuda()

    pOptimizer = torch.optim.Adam(pModel.parameters(), lr=opt.lr, betas=(0.5, 0.999))
    bgOptimizer = torch.optim.Adam(bgModel.parameters(), lr=opt.lr, betas=(0.5, 0.999))

    p_lr_scheduler = lr_scheduler.StepLR(pOptimizer, step_size=30, gamma=0.1)
    bg_lr_scheduler = lr_scheduler.StepLR(bgOptimizer, step_size=30, gamma=0.1)

    criterion = nn.CrossEntropyLoss()

    pModel = pre_train(pModel, pre_dataloaders, pOptimizer, criterion, p_lr_scheduler, 'person', 60)
    bgModel = pre_train(bgModel, pre_dataloaders, bgOptimizer, criterion, bg_lr_scheduler, 'bg', 60)



