"""
Module to train the model for the first task. Seperated from the rest of the code for the purpose of clarity
The paper treats a pretrained Alexnet model as the initial expert so this file also helps to recreate that setting
without overtly making the generate_models.py file complicated to read at the expense of some redundancy in the code.
"""

#!/usr/bin/env python
# coding: utf-8

import torch 
import os
from torchvision import models
from autoencoder import GeneralModelClass
from utils.encoder_utils import exp_lr_scheduler
from utils.utils_1 import *
import copy
from autoencoder import *

import numpy as np
from sklearn.cluster import KMeans
from tqdm import tqdm
import sys
sys.path.append(os.path.join(os.getcwd(), 'utils'))
from model_utils import *


def train_model_1(num_classes, feature_extractor, encoder_criterion, dset_loaders, test_dset_loaders, dset_size, test_dset_size, num_epochs, use_gpu, task_number,  batch_size, lr = 0.1, alpha = 0.,):
	""" 
	Inputs: 
		1) num_classes = The number of classes in the new task  
		2) feature_extractor = A reference to the feature extractor model  
		3) encoder_criterion = The loss criterion for training the Autoencoder
		4) dset_loaders = Dataset loaders for the model
		5) dset_size = Size of the dataset loaders
		6) num_of_epochs = Number of epochs for which the model needs to be trained
		7) use_gpu = A flag which would be set if the user has a CUDA enabled device
		8) task_number = A number which represents the task for which the model is being trained
		9) lr = initial learning rate for the model
		10) alpha = Tradeoff factor for the loss   

	Function: Trains the model on the first task specifically
		
	"""
	since = time.time()
	best_perform = 10e6
	device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
	
	model_init = GeneralModelClass(num_classes)
	model_init.to(device)

	for param in model_init.Tmodel.classifier.parameters():
		param.requires_grad = True
	# for param in model_init.Tmodel.fc.parameters():
	# 	param.requires_grad = True

	for param in model_init.Tmodel.features.parameters():
		param.requires_grad = False

	for param in model_init.Tmodel.features[8].parameters():
		param.requires_grad = True

	for param in model_init.Tmodel.features[10].parameters():
		param.requires_grad = True

		
	#model_init.to(device)
	print ("Initializing an Adam optimizer")
	optimizer = optim.Adam(model_init.Tmodel.parameters(), lr = 0.003, weight_decay= 0.0001)

	try:
		print ("Creating the directory for the new model")
		os.mkdir(os.path.join(os.getcwd(), "models", "trained_models", f"model_{task_number}"))
	except FileExistsError:
		pass
	mypath = os.path.join(os.getcwd(), "models", "trained_models", f"model_{task_number}")
	
	# Store the number of classes in the file for future use
	with open(os.path.join(mypath, 'classes.txt'), 'w') as file1:
		input_to_txtfile = str(num_classes)
		file1.write(input_to_txtfile)
		file1.close()

	for epoch in range(num_epochs):
		since = time.time()
		best_perform = 10e6
		
		print ("Epoch {}/{}".format(epoch+1, num_epochs))
		print ("-"*20)
		#print ("The training phase is ongoing".format(phase))
		
		running_loss = 0
		running_cluster_loss = 0
		running_condition_loss = 0
		running_corrects = 0

		#scales the optimizer every 10 epochs
		optimizer = exp_lr_scheduler(optimizer, epoch, lr)
		model_init = model_init.train(True)
		
		for data in dset_loaders:
			input_data, labels = data

			del data

			if (use_gpu):
				input_data = Variable(input_data.to(device))
				labels = Variable(labels.to(device)) 
			
			else:
				input_data  = Variable(input_data)
				labels = Variable(labels)
			
			output = model_init(input_data)
			#ref_output = ref_model(input_data)


			optimizer.zero_grad()
			#model_init.zero_grad()

			# 交叉熵损失
			loss = model_criterion(output, labels, flag = "CE")


			# 计算Acc
			from sklearn.metrics import silhouette_score


			# 创建KMeans模型，设置聚类数量
			kmeans = KMeans(n_clusters=task_number+1)

			kmeans.fit(input_data.cpu().reshape(batch_size, -1))
			# 计算聚类损失（轮廓系数）
			silhouette_score = silhouette_score(input_data.cpu().reshape(batch_size, -1), kmeans.labels_)

			del input_data
			del labels
			#del output

			loss.backward()
			optimizer.step()

			running_loss += loss.item()
			running_cluster_loss += silhouette_score

		# if epoch % 10 == 0:


		with torch.no_grad():
			model_init.eval()

			# encoder_path = os.path.join(os.getcwd(), "models", "autoencoders")
			# ae_path = os.path.join(encoder_path, "autoencoder_" + str(task_number))
			# auto_model = Autoencoder()
			# auto_model.load_state_dict(torch.load(os.path.join(ae_path, 'best_performing_model.pth')))
			#
			# # Get the number of classes that this expert was exposed to

			# initialize the results statistics
			for test_data in test_dset_loaders:
				input_data, labels = test_data
				del test_data

				if (use_gpu):
					input_data = Variable(input_data.to(device))
					labels = Variable(labels.to(device))

				else:
					input_data = Variable(input_data)
					labels = Variable(labels)

				model_init.to(device)

				outputs = model_init(input_data)
				# loss = model_criterion(outputs, labels, 'CE')
				criterion = torch.nn.CrossEntropyLoss()
				loss = criterion(outputs, labels)
				# for a more robust analysis check over the entire output layer (similar to multi head setting)
				_, preds = torch.max(outputs, 1)

				# check over only the specific layer identified by the AE (similar to single head setting)
				# uncomment this line if you wish to evalute this setting
				# _, preds = torch.max(outputs[:, -classes[model_number]:], 1)
				#
				running_corrects += torch.sum(preds == labels.data)
				running_condition_loss += loss.item()

				del preds
				del input_data
				del labels

		epoch_accuracy = running_corrects.double() / test_dset_size
		epoch_ce_loss = running_loss/dset_size
		epoch_cluster_loss = running_cluster_loss/dset_size
		epoch_condition_loss = running_condition_loss / test_dset_size

		print(' Epoch {} *****  Acc:{},  CE Loss:{},  Cluster Loss:{},  Condition Loss:{}'.format(epoch, epoch_accuracy, epoch_ce_loss, epoch_cluster_loss, epoch_condition_loss))
		# print('Epoch CE Loss:{}'.format(epoch_loss))
		# print('Epoch Cluster Loss:{}'.format(epoch_cluster_loss))
		# print('Epoch Condition Loss:{}'.format(model_condition_loss))

		if(epoch != 0 and epoch != num_epochs -1 and (epoch+1) % 10 == 0):
			epoch_file_name = os.path.join(mypath, str(epoch+1)+'.pth.tar')
			torch.save({
			'epoch': epoch,
			'epoch_loss': running_loss,
			'model_state_dict': model_init.state_dict(),
			'optimizer_state_dict': optimizer.state_dict(),

			}, epoch_file_name)


	torch.save(model_init.state_dict(), mypath + "/best_performing_model.pth")		
		

	del model_init




