import time

import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.optim import lr_scheduler


import argparse
import numpy as np
import torch
import torch.nn as nn

import DataUtils
import os
from torchvision import datasets, transforms
import Models
import DAM

import torch.cuda.amp.grad_scaler

import torch.cuda.amp.autocast_mode

parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--use_gpu',default=True, type=str,help='gpu id')
parser.add_argument('--person_data_dir',default='E:/datasets/person_train_datasets/',type=str, help='personReid pre train dir path')
parser.add_argument('--bg_data_dir',default='E:/datasets/bg_train_datasets/',type=str, help='bgReid pre train dir path')
parser.add_argument('--train_data_dir',default='E:/datasets/',type=str, help='train dir path')
parser.add_argument('--pre_batchsize', default=5, type=int, help='batchsize')
parser.add_argument('--batchsize', default=5, type=int, help='batchsize')
parser.add_argument('--train_batch', default=5, type=int, help='batchsize')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
opt = parser.parse_args()


def save_list(file_name, to_save):
    file = open(file_name, 'a')
    for item in to_save:
        file.write(str(item))
        file.write('\n')
    file.close()

def save_network(network, epoch_label, type_name):
    save_filename = type_name + 'net_%s.pth' % epoch_label
    save_path = './train_model/' + save_filename
    torch.save(network.cpu().state_dict(), save_path)
    if torch.cuda.is_available():
        network.cuda(0)


def pre_train(Model, dataloader, optimizer, criterion, scheduler, type_name, epochs = 60):
    since = time.time()
    Model.train(True)
    for epoch in range(epochs):
        epoch_since = time.time()
        print('{} epoch {} / {}'.format(type_name, epoch + 1, epochs))
        scheduler.step()
        running_loss = 0.0
        for data in dataloader[type_name]:
            inputs, labels = data
            now_batch_size, c, h, w = inputs.shape
            if now_batch_size < opt.pre_batchsize:
                continue
            inputs = Variable(inputs.cuda().detach())
            labels = Variable(labels.cuda().detach())

            optimizer.zero_grad()

            person_outputs = Model(inputs)
            loss = criterion(person_outputs, labels)

            loss.backward()
            optimizer.step()

            running_loss += loss.item() * now_batch_size
        epoch_loss = running_loss / pre_datasets_size[type_name]

        print('{} epoc {} / {}, loss: {:.4f}'.format(
            type_name, epoch + 1, epochs, epoch_loss))
        if epoch % 10 == 0:
            save_network(Model, epoch, 'pre_train_' + type_name)
        epoch_end = time.time() - epoch_since
        print('{} epoch {} train complete in {:.0f}m {:.0f}s'.format(type_name, epoch + 1, epoch_end // 60, epoch_end % 60))

    end = time.time() - since
    print('{} Model Training complete in {:.0f}m {:.0f}s'.format(
        type_name, end // 60, end % 60))
    save_network(Model, 'last', 'pre_train_' + type_name)
    return Model


def train(pModel, pOptimizer, pCriterion, pscheduler, bgModel, bgOptimizer, bgCriterion, bgscheduler, dataloader, total_epochs, p_epochs, bg_epochs):
    since = time.time()
    pModel.train(True)
    bgModel.train(True)
    bg_last_model_wts = None
    p_last_model_wts = None
    for total_epoch_item in range(total_epochs):
        print('Total epoch {} / {}'.format(total_epoch_item + 1, total_epochs))

        # train personReid model
        for p_epoch in range(p_epochs):
            epoch_since = time.time()
            for phase in ['train', 'val']:
                if phase == 'train':
                    pscheduler.step()
                    pModel.train(True)
                else:
                    pModel.train(False)
                p_running_loss = 0.0
                p_running_correct = 0.0
                for data in dataloader[phase]:
                    p_inputs, p_labels = data

                    p_now_batch_size, p_c, p_h, p_w = p_inputs.shape
                    if p_now_batch_size < opt.train_batch:
                        continue

                    p_inputs = Variable(p_inputs.cuda().detach())
                    p_labels = Variable(p_labels.cuda().detach())

                    pOptimizer.zero_grad()

                    if phase == 'val':
                        with torch.no_grad():
                            p_outputs = pModel(p_inputs)
                    else:
                        p_outputs = pModel(p_inputs)

                    # shape: batch * class
                    # DAM with bgModel
                    if phase == 'train':
                        if bg_last_model_wts is not None:
                            bgModel.load_state_dict(bg_last_model_wts)
                        bg_outputs = bgModel(p_inputs)
                        DAM.DAM(p_outputs, bg_outputs)

                    _, p_preds = torch.max(p_outputs.data, 1)
                    p_loss = pCriterion(p_outputs, p_labels)

                    if phase=='train':
                        p_loss.backward()
                        pOptimizer.step()

                    p_running_loss += p_loss.item() * p_now_batch_size
                    p_running_correct += float(torch.sum(p_preds == p_labels.data))


                p_epoch_loss = p_running_loss / dataset_sizes[phase]
                p_epoch_acc = p_running_correct / dataset_sizes[phase]
                print('person epoc {} / {}, loss: {:.4f}, acc: {:.4f}'.format(
                p_epoch + 1, p_epochs, p_epoch_loss, p_epoch_acc))

                person_train_loss.append(p_epoch_loss)
                person_train_err.append(1.0 - p_epoch_acc)
                person_val_acc.append(p_epoch_acc)

                save_list('person_train_loss.txt', person_train_loss)
                save_list('person_train_err.txt', person_train_err)
                save_list('person_val_acc.txt', person_val_acc)

                if phase == 'val':
                    p_last_model_wts = pModel.state_dict()
                    if p_epoch % 3 == 0:
                        save_network(pModel, p_epoch, 'person_train_')

            epoch_end = time.time() - epoch_since
            print('person epoch {} train complete in {:.0f}m {:.0f}s'.format(p_epoch + 1, epoch_end // 60,
                                                                         epoch_end % 60))

         # train bgReid model
        for bg_epoch in range(bg_epochs):
            epoch_since = time.time()
            for phase in ['train', 'val']:
                if phase == 'train':
                    bgscheduler.step()
                    bgModel.train(True)
                else:
                    bgModel.train(False)
                bg_running_loss = 0.0
                bg_running_correct = 0.0
                for data in dataloader[phase]:
                    bg_inputs, bg_labels = data

                    bg_now_batch_size, bg_c, bg_h, bg_w = bg_inputs.shape
                    if bg_now_batch_size < opt.train_batch:
                        continue

                    bg_inputs = Variable(bg_inputs.cuda().detach())
                    bg_labels = Variable(bg_labels.cuda().detach())

                    bgOptimizer.zero_grad()

                    if phase == 'val':
                        with torch.no_grad():
                            bg_outputs = bgModel(bg_inputs)
                    else:
                        bg_outputs = bgModel(bg_inputs)

                    # shape: batch * class
                    # DAM with bgModel
                    if phase == 'train':
                        if p_last_model_wts is not None:
                            pModel.load_state_dict(p_last_model_wts)
                        p_outputs = pModel(bg_inputs)
                        DAM.DAM(bg_outputs, p_outputs)

                    _, bg_preds = torch.max(bg_outputs.data, 1)
                    bg_loss = bgCriterion(bg_outputs, bg_labels)

                    if phase == 'train':
                        bg_loss.backward()
                        bgOptimizer.step()

                    bg_running_loss += bg_loss.item() * bg_now_batch_size
                    bg_running_correct += float(torch.sum(bg_preds == bg_labels.data))

                bg_epoch_loss = bg_running_loss / dataset_sizes[phase]
                bg_epoch_acc = bg_running_correct / dataset_sizes[phase]
                print('bg epoc {} / {}, loss: {:.4f}, acc: {:.4f}'.format(
                    bg_epoch + 1, bg_epochs, bg_epoch_loss, bg_epoch_acc))

                bg_train_loss.append(bg_epoch_loss)
                bg_train_err.append(1.0 - bg_epoch_acc)
                bg_val_acc.append(bg_epoch_acc)

                save_list('bg_train_loss.txt', bg_train_loss)
                save_list('bg_train_err.txt', bg_train_err)
                save_list('bg_val_acc.txt', bg_val_acc)

                if phase == 'val':
                    bg_last_model_wts = bgModel.state_dict()
                    if bg_epoch % 3 == 0:
                        save_network(bgModel, bg_epoch, 'bg_train_')
            epoch_end = time.time() - epoch_since
            print('bg epoch {} train complete in {:.0f}m {:.0f}s'.format(bg_epoch + 1, epoch_end // 60,
                                                                             epoch_end % 60))


    end = time.time() - since

    print('pModel and bgModel train complete in {:.0f}m {:.0f}s'.format(
        end // 60, end % 60))

    # save pModel and bgModel
    pModel.load_state_dict(p_last_model_wts)
    bgModel.load_state_dict(bg_last_model_wts)
    save_network(pModel, 'last', 'last_person_train')
    save_network(bgModel, 'last', 'last_bg_train')

    return pModel, bgModel



if __name__ == '__main__':

    person_data_dir = opt.person_data_dir
    bg_data_dir = opt.bg_data_dir
    gpu_id = 0 if opt.use_gpu == True else -1
    if gpu_id == 0:
        torch.cuda.set_device(gpu_id)
        cudnn.benchmark = True

    # pre train
    # pre_train_datasets = {}
    # pre_train_datasets['person'] = datasets.ImageFolder(person_data_dir, DataUtils.data_transforms['train'])
    # pre_train_datasets['bg'] = datasets.ImageFolder(bg_data_dir, DataUtils.data_transforms['train'])
    #
    # pre_dataloaders = {x: torch.utils.data.DataLoader(pre_train_datasets[x], batch_size=opt.pre_batchsize, shuffle=True, num_workers = 8, pin_memory=True)
    #                    for x in ['person', 'bg']}
    # pre_datasets_size = {x: len(pre_train_datasets[x]) for x in ['person', 'bg']}
    #
    # pModel = Models.resnet50().cuda()
    # bgModel = Models.resnet50().cuda()
    #
    # pOptimizer = torch.optim.Adam(pModel.parameters(), lr=opt.lr, betas=(0.5, 0.999))
    # bgOptimizer = torch.optim.Adam(bgModel.parameters(), lr=opt.lr, betas=(0.5, 0.999))
    #
    # p_lr_scheduler = lr_scheduler.StepLR(pOptimizer, step_size=30, gamma=0.1)
    # bg_lr_scheduler = lr_scheduler.StepLR(bgOptimizer, step_size=30, gamma=0.1)
    #
    # criterion = nn.CrossEntropyLoss()
    #
    # pModel = pre_train(pModel, pre_dataloaders, pOptimizer, criterion, p_lr_scheduler, 'person', 1)
    # bgModel = pre_train(bgModel, pre_dataloaders, bgOptimizer, criterion, bg_lr_scheduler, 'bg', 1)


    # train

    person_train_loss = []
    bg_train_loss = []

    person_train_err = []
    bg_train_err = []

    person_val_acc = []
    bg_val_acc = []

    pModel = Models.resnet50()
    pModel.load_state_dict(torch.load('E:/pre_train_models/pre_train_personnet_last.pth'))
    pModel = pModel.cuda()

    bgModel = Models.resnet50()
    bgModel.load_state_dict(torch.load('E:/pre_train_models/pre_train_bgnet_last.pth'))
    bgModel = bgModel.cuda()

    total_epochs = 2
    p_epochs = 1
    bg_epochs = 1

    data_dir = opt.train_data_dir

    # load data
    image_datasets = {}
    image_datasets['train'] = datasets.ImageFolder(data_dir + 'train_all/', DataUtils.data_transforms['train'])
    image_datasets['val'] = datasets.ImageFolder(data_dir + 'val/', DataUtils.data_transforms['val'])

    dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
                                                  shuffle=True, num_workers=8, pin_memory=True)
                   for x in ['train', 'val']}

    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

    person_optimizer = torch.optim.Adam(pModel.parameters(), lr=opt.lr, betas=(0.5, 0.999), eps=1e-3)
    bg_optimizer = torch.optim.Adam(bgModel.parameters(), lr=opt.lr, betas=(0.5, 0.999), eps=1e-3)

    person_lr_scheduler = lr_scheduler.StepLR(person_optimizer, step_size=30, gamma=0.1)
    bg_lr_scheduler = lr_scheduler.StepLR(bg_optimizer, step_size=30, gamma=0.1)

    train_criterion = nn.CrossEntropyLoss()

    train(pModel, person_optimizer, train_criterion, person_lr_scheduler,
          bgModel, bg_optimizer, train_criterion, bg_lr_scheduler,
          dataloaders, total_epochs, p_epochs, bg_epochs)

