import time

import torch
import numpy as np
import torch.nn.functional as F

from tqdm import tqdm
from torch import optim, nn
from torch.utils.data import DataLoader

from eval import evaluate
from models.facenet import FaceNet
from data.face_dataset import FaceDataset
from models.loss import triplet_loss, LossHistory
from data.dataloader import FaceNetDataset, dataset_collate
from utils import create_annotation


def get_num_classes(annotation_file):
	with open(annotation_file) as fp:
		dataset_path = fp.readlines()
		labels = [int(path.split(";")[0]) for path in dataset_path]
	return max(labels) + 1


def get_lr(opt):
	for param_group in opt.param_groups:
		return param_group['lr']


def fit_ont_epoch(loss_func, epoch, epoch_size, gen, val_epoch_size, gen_val, test_loader):
	total_triple_loss = 0
	total_cross_entropy_loss = 0
	total_accuracy = 0

	val_total_triple_loss = 0
	val_total_cross_entropy_loss = 0
	val_total_accuracy = 0

	print("Start Train")
	model.train()
	with tqdm(total=epoch_size, desc=f"epoch {epoch + 1}/{Epoch}", postfix=dict, mininterval=0.3) as bar:
		for iteration, batch in enumerate(gen):
			images, labels = batch
			with torch.no_grad():
				images = torch.from_numpy(images).type(torch.FloatTensor).to(device)
				labels = torch.from_numpy(labels).long().to(device)

			optimizer.zero_grad()
			before_normalize, outputs1 = model.forward_feature(images)
			outputs2 = model.forward_classifier(before_normalize)

			_triplet_loss = loss_func(outputs1, batch_size)
			_cross_entropy_loss = nn.NLLLoss()(F.log_softmax(outputs2, dim=-1), labels)
			loss = _triplet_loss + _cross_entropy_loss

			loss.backward()
			optimizer.step()

			with torch.no_grad():
				tmp = torch.argmax(F.softmax(outputs2, dim=-1), dim=-1) == labels
				accuracy = torch.mean(tmp.type(torch.FloatTensor))

			total_accuracy += accuracy.item()
			total_triple_loss += _triplet_loss.item()
			total_cross_entropy_loss += _cross_entropy_loss.item()

			postfix = {"total_triple_loss": total_triple_loss / (iteration + 1),
			           "total_cross_entropy_loss": total_cross_entropy_loss / (iteration + 1),
			           "accuracy": total_accuracy / (iteration + 1),
			           "lr": get_lr(optimizer)}
			bar.set_postfix(**postfix)
			bar.update(1)

	model.eval()
	print('Start Validation')
	with tqdm(total=val_epoch_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
		for iteration, batch in enumerate(gen_val):
			images, labels = batch
			with torch.no_grad():
				images = torch.from_numpy(images).type(torch.FloatTensor).to(device)
				labels = torch.from_numpy(labels).long().to(device)

			optimizer.zero_grad()
			before_normalize, outputs1 = model.forward_feature(images)
			outputs2 = model.forward_classifier(before_normalize)

			_triplet_loss = loss_func(outputs1, batch_size)
			_cross_entropy_loss = nn.NLLLoss()(F.log_softmax(outputs2, dim=-1), labels)

			with torch.no_grad():
				tmp = torch.argmax(F.softmax(outputs2, dim=-1), dim=-1) == labels
				accuracy = torch.mean(tmp.type(torch.FloatTensor))

			val_total_accuracy += accuracy.item()
			val_total_triple_loss += _triplet_loss.item()
			val_total_triple_loss += _triplet_loss.item()

			pbar.set_postfix(**{'val_total_triple_loss': val_total_triple_loss / (iteration + 1),
			                    'val_total_CE_loss': val_total_triple_loss / (iteration + 1),
			                    'val_accuracy': val_total_accuracy / (iteration + 1),
			                    'lr': get_lr(optimizer)})
			pbar.update(1)

	print("Start All Test")
	labels, distances = [], []
	for _, (data_a, data_p, label) in enumerate(test_loader):
		with torch.no_grad():
			data_a, data_p = data_a.type(torch.FloatTensor), data_p.type(torch.FloatTensor)
			data_a, data_p = data_a.to(device), data_p.to(device)
			data_a, data_p, label = data_a, data_p, label
			out_a, out_p = model(data_a), model(data_p)
			dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))
		distances.append(dists.data.cpu().numpy())
		labels.append(label.data.cpu().numpy())

	labels = np.array([sub_label for label in labels for sub_label in label])
	distances = np.array([sub_dist for dist in distances for sub_dist in dist])
	tpr, fpr, accuracy, val, val_std, far, best_thresholds, precision, recall = evaluate(distances, labels)

	acc = np.mean(accuracy)
	loss_value = (total_triple_loss + total_cross_entropy_loss) / (epoch_size + 1)
	val_loss_value = (val_total_triple_loss + val_total_cross_entropy_loss) / (val_epoch_size + 1)
	loss_history.append_loss(acc, loss_value, val_loss_value)

	print(f"Finish Validation Epoch: {epoch + 1} / {Epoch}")
	print(f"Accuracy: {np.mean(accuracy) * 100}%")
	print(f"Precision: {np.mean(precision) * 100}%")
	print(f"Recall: {np.mean(recall) * 100}%")
	print(f"Total Loss: {loss_value:.4f}")

	if not (epoch + 1) % 10:  # 每 10 轮保存一次 model
		print('Saving state, iter:', str(epoch + 1))
		model_name = f"{log_dir}Epoch-{epoch + 1}-Total_Loss-{loss_value}.pth-Val_Loss-{val_loss_value}.pth"
		torch.save(model.state_dict(), model_name)

	return (val_total_triple_loss + val_total_cross_entropy_loss) / (val_epoch_size + 1)


if __name__ == '__main__':
	start = time.time()
	lr = 1e-2
	batch_size = 10
	Epoch = 100

	log_dir = "./logs/"
	image_path = "images/face"
	pairs_path = "images/face/pairs.txt"

	input_shape = [160, 160, 3]
	annotation_path = create_annotation("images/face")  # 制作图片对应标注文件
	num_classes = get_num_classes(annotation_path)  # 人脸类别数

	# 选择相应的 device，有 GPU 的用 GPU，没有的话用 CPU
	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	# 实例化模型
	model = FaceNet(num_classes=num_classes)
	# 将模型加载到 device
	model.to(device)
	# 模型初始化
	model.weights_init(init_type="xavier")

	triplet_loss = triplet_loss()
	loss_history = LossHistory(log_dir)

	face_set = FaceDataset(path=image_path, pairs_path=pairs_path, image_size=input_shape)
	face_loader = DataLoader(face_set, batch_size=128, shuffle=False, num_workers=3, drop_last=True)

	val_split = 0.3
	with open(annotation_path, "r") as fp:
		lines = fp.readlines()
	np.random.shuffle(lines)
	num_val = int(len(lines) * val_split)
	num_train = len(lines) - num_val

	optimizer = optim.Adam(model.parameters(), lr)
	lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True)

	train_dataset = FaceNetDataset(input_shape, lines[:num_train], num_train, num_classes)
	val_dataset = FaceNetDataset(input_shape, lines[num_train:], num_val, num_classes)

	train_loader = DataLoader(train_dataset, batch_size, num_workers=3, pin_memory=True, collate_fn=dataset_collate, drop_last=True)
	val_loader = DataLoader(val_dataset, batch_size, num_workers=3, pin_memory=True, collate_fn=dataset_collate, drop_last=True)

	epoch_size = max(1, num_train // batch_size)
	val_epoch_size = max(1, num_val // batch_size)

	for epoch in range(Epoch):
		_loss = fit_ont_epoch(triplet_loss, epoch, epoch_size, train_loader, val_epoch_size, val_loader, face_loader)
		lr_scheduler.step(_loss)

	end = time.time()
	print(f"Speed {end - start} second!")
