# -*- coding:utf-8 -*-
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from tqdm import tqdm
import sys
import torch.backends.cudnn as cudnn

sys.path.append('E:/workspace/face_recognition')

from networks import net
from datasets.train_dataset import Ms1mDataset
from datasets.verification_dataset import VerificationDataset
from eval.Metric import AccMetric, LossMetric
from shutil import rmtree
from eval.verification import calc_acc

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# device = 'cpu'

def calc_acc_loss(data, target):
    # Acc metric
    output, output_real, _ = model(data, target)
    loss = F.nll_loss(output, target.flatten())
    pred = torch.argmax(output_real, dim=1, keepdim=True)
    acc = (pred == target).cpu().float().mean()
    return acc, loss


def adjust_lr(epoch):
    lr_t = {13: 0.1, 19: 0.01, 22: 0.001}
    _lr = lr_t[epoch] if epoch in lr_t.keys() else 0.001

    for param_group in optimizer.param_groups:
        param_group['lr'] = _lr
    print('lr is changed to ', _lr)


def save_checkpoint(path='torch_face_params'):
    state = {
        'model': model.state_dict(),
        'optimizer': optimizer.state_dict()
    }
    filepath = f'{path}/resnet50.pt'
    torch.save(state, filepath)


def verification():
    model.eval()
    acc_final = 0.0
    iters = len(veri_loader)
    with torch.no_grad():
        for i, (data, label) in enumerate(veri_loader):
            if i > samplerate * iters:
                break
            label = label.type(torch.int64)
            # print(label.shape)
            data, label = data.to(device), label.to(device)
            _, _, embedding = model(data, label)
            acc = calc_acc(embedding.cpu(), label.cpu())
            acc_final += acc

    return acc_final


def train_on_epoch(epoch, top_acc, acc_metrics, loss_metrics):
    progressbar = tqdm(train_loader)
    iters = len(train_loader)
    for iter_idx, (data, target) in enumerate(progressbar):
        if iter_idx > samplerate * iters:
            break
        global_step = iter_idx + iters * epoch
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        acc, loss = calc_acc_loss(data, target)
        loss.backward()
        optimizer.step()
        loss_metrics.update(loss)
        acc_metrics.update(acc)

        progressbar.set_description(f'Epoch: {epoch} Loss: {loss:.4f} Acc: {acc_metrics.avg * 100:.2f}%')
        writer.add_scalars('training', {'loss': loss_metrics.cur,
                                        'acc': acc_metrics.cur, }, global_step=global_step)

        if acc_metrics.avg > top_acc:
            top_acc = acc_metrics.avg
            if top_acc > 0.75 and global_step % 500 == 0:
                save_checkpoint()
    veri_acc = verification()
    progressbar.set_description(f'Epoch: {epoch} Loss: {loss:.4f} Acc: {acc_metrics.avg * 100:.2f}% Valid acc: {veri_acc * 100:.2f}%')
    progressbar.close()
    return top_acc, veri_acc


def train(epochs=30):
    adj_lr_num = [13, 19, 22]
    top_acc = 0.0
    for epoch in range(epochs):
        # training
        model.train()
        train_acc = AccMetric('train_acc')
        train_loss = LossMetric('train_loss')
        if epoch in adj_lr_num:
            adjust_lr(epoch)
        top_acc, veri_acc = train_on_epoch(epoch, top_acc, train_acc, train_loss)
        writer.add_scalar('validation/acc', veri_acc, epoch)


if __name__ == '__main__':
    # auto-tuner to find the best algorithm to use for your hardware.
    if device != 'cpu':
        cudnn.benchmark = True

    print('Training......')
    tensorboard_path = './tb'
    rmtree(tensorboard_path, ignore_errors=True)
    writer = SummaryWriter(tensorboard_path)

    per_batch_size = 128

    train_dataset = Ms1mDataset('E:/datasets/faces_emore')
    train_loader = DataLoader(dataset=train_dataset, batch_size=per_batch_size, shuffle=True, num_workers=1)

    veri_dataset = VerificationDataset('E:/datasets/faces_emore')
    veri_loader = DataLoader(dataset=veri_dataset, batch_size=per_batch_size, shuffle=False, num_workers=1)

    print('total people: ', train_dataset.num_people)
    model = net.InsightFace(train_dataset.num_people).to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    samplerate = 0.0002
    train(10)

    writer.close()
