import warnings
import torch
from torch.utils.data import DataLoader
# from SoyDNGPNext.weight_map import weight_decoder
# from remodel import remodel
from sklearn import metrics
from data_process import *
# from epoch_data import *
from utils import outpath
from eval import *
import os
from MyModel import CNN_MODEL
from focal_loss import FocalLoss
import shutil
import random
nohupout_path = ""

path = os.path.dirname(__file__)


def init_weights(net):
	if type(net) == torch.nn.Linear or type(net) == torch.nn.Conv2d:
		# 避免梯度消失或爆炸的问题
		torch.nn.init.xavier_uniform_(net.weight)


class data_loader(torch.utils.data.Dataset):
	def __init__(self, data, label):
		self.data = torch.from_numpy(data)
		self.label = torch.from_numpy(label)

	def __len__(self):
		return len(self.data)

	def __getitem__(self, index):
		genotype = self.data[index].float()
		label = self.label[index].float()
		return genotype, label


# In this module, users could train their own datasets on our baseline
class Train:
	def __init__(self, vcf_path, trait_path, percentage_of_train=0.7, num_workers=8, batch_size=20):
		global nohupout_path
		# ignore warnings
		warnings.filterwarnings("ignore")
		# select which device to train
		self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
		# percentage of train dataset
		self.percentage_of_train = percentage_of_train
		# the training batch size
		self.batch_size = batch_size
		# workers amount
		self.num_workers = num_workers
		# output path
		self.saved_path = outpath('train')
		nohupout_path = f"{self.saved_path}/"
		# read .vcf file and .csv trait file, and instantiate the data process class
		self.d = DataProcess(vcf_path, trait_path)
		# process and return the Quality Traits dictionary and Quantity Traits dictionary
		self.p_trait_dic, self.n_trait_dic = self.d.convert_trait(f"{self.saved_path}/configs/")
		print(f"yaml configs are saved in {self.saved_path}/configs/")
		# turn keys to lists
		self.p_trait_list, self.n_trait_list = list(self.p_trait_dic.keys()), list(self.n_trait_dic.keys())

	# 分割成训练集和测试集
	# create train and test dataloader for training
	def dataloader(self, trait, is_quality):
		# split dataset by percentage of dataset
		train_data, train_label, test_data, test_label = self.d.to_dataset(trait, percentage=self.percentage_of_train,
																		   is_quality=is_quality)
		# dataloader of train dataset
		train_dataloader = DataLoader(data_loader(train_data, train_label), batch_size=self.batch_size, shuffle=True,
									  num_workers=self.num_workers)
		# dataloader of test dataset
		test_dataloader = DataLoader(data_loader(test_data, test_label), batch_size=self.batch_size, shuffle=True,
									 num_workers=self.num_workers)
		return train_dataloader, test_dataloader

	def dataloader_test(self, trait,merged_list,is_quality):
		# split dataset by percentage of dataset
		train_data, train_label, test_data, test_label = self.d.get_test_train_dataloader(trait,merged_list,percentage=self.percentage_of_train,
																		   is_quality=is_quality)
		# dataloader of train dataset
		train_dataloader = DataLoader(data_loader(train_data, train_label), batch_size=self.batch_size, shuffle=True,
									  num_workers=self.num_workers)
		# dataloader of test dataset
		test_dataloader = DataLoader(data_loader(test_data, test_label), batch_size=self.batch_size, shuffle=True,
									 num_workers=self.num_workers)
		return train_dataloader, test_dataloader

	def get_epoch_Data(self, trait):
		pos_list,nag_list = self.d.to_dataset_test(trait)
	
		return pos_list,nag_list


	# train an epoch
	def train_for_epoch(self, train_dataloader, updater, loss, net):
		loss_ = 0.0
		# 分割成每个批次
		for num_data, (genomap, target_trait) in enumerate(train_dataloader):
			genomap, target_trait = genomap.to(self.device), target_trait.to(self.device)
			trait_hat = net(genomap)
			loss_for_batch = loss(trait_hat, target_trait.long())
			loss_ += loss_for_batch
			if isinstance(updater, torch.optim.Optimizer):
				updater.zero_grad()
				loss_for_batch.backward()
				updater.step()
		return loss_ / (num_data + 1)

	# train the Quantity Traits 训练网络
	def train_n(self, percentage=0.05, epoch=5, weight_decay=1e-5, draw=True):
		epoch_total = epoch
		weight_save_path = f"{self.saved_path}/weight/"
		pic_save_path = f"{self.saved_path}/"
		os.makedirs(weight_save_path, exist_ok=True)
		# 调用remodel模块
		# net, decoder = remodel(f"{path}/data/model.yaml", 1)
		net = CNN_MODEL()
		print(f"Your model structure is: \n{net}")
		net.to(self.device)
		# initialize weights
		net.apply(init_weights)
		# set Smooth S1 Loss as the loss function
		loss = torch.nn.SmoothL1Loss()
		# set updater with Adam algorithm  Adam 是一种常用的梯度下降算法，在深度学习中广泛使用。通过将模型的参数传递给优化器
		updater = torch.optim.Adam(net.parameters(), weight_decay=weight_decay)
		for trait in self.n_trait_list:
			# create the evaluation dictionary, Quantity Traits and Quality Traits have different evaluation indexes
			eval_dict = {'train_loss': [], 'test_loss': [], 'mse': [], 'r': [], 'avg_mse': []}
			# initialize the loss function
			mse_loss = torch.nn.MSELoss()
			# train and test dataloaders
			train_dataloader, test_dataloader = self.dataloader(trait, is_quality=False)
			# train each epoch
			# the minimum coefficient value
			min_r = 0.0
			while epoch:
				print(f"Now training epoch {epoch_total - epoch + 1}")
				# set the model as train mode
				net.train()
				# train this epoch and compute the average loss
				avg_train_loss = self.train_for_epoch(train_dataloader, updater, loss, net)
				eval_dict['train_loss'].append(float(avg_train_loss.to('cpu').detach().numpy()))
				# set the model as evaluation mode 这行代码将神经网络模型 net 设置为评估模式。在评估模式下，模型不会进行梯度更新，同时会禁用 dropout 为了开始训练测试集
				# net.eval()
				# for the test dataset
				epoch -= 1

			epoch = 5
			while epoch:
				net.eval()
				with torch.no_grad():
					print(f"Now training test epoch {epoch_total - epoch + 1}")
					# initialize the evaluation indexes
					mse = 0.0
					loss_test = 0.0
					hat = np.array([])
					truth = np.array([])
					for index, (test_genomap, test_trait) in enumerate(test_dataloader):
						# load the feature maps and traits on device
						test_genomap, test_trait = test_genomap.to(self.device), test_trait.to(self.device)
						# the predict results
						y_hat = net(test_genomap)
						# compute loss on test dataset
						loss_test += loss(y_hat, test_trait.long())
						# reshape y_hat as an 1-dimension vector, and insert it to hat
						hat = np.insert(hat, 0, y_hat.to('cpu').detach().numpy().reshape(len(y_hat), ), axis=0)
						# reshape acc as an 1-dimension vector, and insert it to acc
						truth = np.insert(truth, 0, test_trait.to('cpu').numpy(), axis=0)
						# compute MSE between predict results and actual values
						mse += mse_loss(y_hat, test_trait)
					# count evaluation indexes
					loss_test = loss_test / (index + 1)
					eval_dict['test_loss'].append(float(loss_test.to('cpu').detach().numpy()))
					# eval_dict['train_loss'].append(float(avg_train_loss.to('cpu').detach().numpy()))
					# the average MSE
					avg_mse = mse / (index + 1)
					# compute the correlation coefficient value
					r = np.corrcoef(hat, truth)[0][1]
					# append the coefficient value into evaluation dictionary
					eval_dict['r'].append(r)
					eval_dict['mse'].append(mse)
					eval_dict['avg_mse'].append(avg_mse)
					# save the best weight with the highest coefficient value 保存最好一次的相关系数
					# if r >= min_r:
					# 	torch.save(net, os.path.join(weight_save_path, f'{trait}_best.pt'))
					# 	# refresh the minimum coefficient value
					# 	min_r = eval_dict['r'][-1]
					epoch -= 1
		print(f"weight are saved in {weight_save_path}/{trait}_best.pt")
		# set the trait of this evaluation dictionary
		eval_dict['trait'] = trait
		if draw:
			# Eval(eval_dict, pic_save_path)
			pic_draw(eval_dict, pic_save_path)
			print(f"pictures are saved in {pic_save_path}")

		# snp_pos = weight_decoder(net,decoder,self.d.columns, percentage=percentage)
		# print(f"Top {percentage*100}% most important SNP positions are: {snp_pos}")
		return eval_dict

	# 用作分类
	# train the Quality Traits
	def train_p(self, percentage=0.05, epoch=5, weight_decay=1e-5, draw=True):
		epoch_total = epoch
		weight_save_path = f"{self.saved_path}/weight/"
		pic_save_path = f"{self.saved_path}/"
		os.makedirs(weight_save_path, exist_ok=True)
		for trait in self.p_trait_list:
			# train and test dataloaders
			# train_dataloader, test_dataloader = self.dataloader(trait, is_quality=True)
			pos_list,nag_list=self.get_epoch_Data(trait)
			# get how many levels this trait has
			num_classes = len(self.p_trait_dic[trait])
			# load the model
			# for different class amounts, the net has to be reloaded in each loop
			# net, decoder = remodel(f"{path}/data/model.yaml", num_classes)
			net = CNN_MODEL()
			print(f"Your model structure is: \n{net}")
			net.to(self.device)
			# initialize weights
			net.apply(init_weights)
			# set Entropy Loss as the loss function
			# loss = torch.nn.CrossEntropyLoss()
			loss = FocalLoss()
			# set updater with Adam algorithm
			updater = torch.optim.Adam(net.parameters(), weight_decay=weight_decay)
			# evaluation dictionary of Quality Traits
			eval_dict = {'train_loss': [], 'test_loss': [], 'acc': [], 'recall': [], 'precision': [], 'f1_score': []}
			# the minimum evaluation value
			eval_value = 0.0
			chunk_size = 2000
			# train each epoch
			for i in range(0, len(nag_list), chunk_size):
				chunk = nag_list[i:i + chunk_size]
				merged_list = pos_list + chunk
				random.shuffle(merged_list)
                            
				train_dataloader, test_dataloader = self.dataloader_test(trait,merged_list, is_quality=True)

				print(f"Now training epoch {epoch_total - epoch + 1}")
				# set the model as train mode
				net.train()
				# train this epoch and compute the average loss
				avg_train_loss = self.train_for_epoch(train_dataloader, updater, loss, net)
				# set the model as evaluation mode
				net.eval()
				# print(f"Now training test epoch {epoch_total - epoch + 1}")
				# for the test dataset
				with torch.no_grad():
					# initialize the evaluation indexes
					acc_score = 0.0
					recall_score = 0.0
					f1_score = 0.0
					precision = 0.0
					loss_test = 0.0
					hat = np.array([])
					truth = np.array([])
					for index, (test_genomap, test_trait) in enumerate(test_dataloader):
						# load the feature maps and traits on device
						test_genomap, test_trait = test_genomap.to(self.device), test_trait.to(self.device)
						# the predict results
						y_hat = net(test_genomap)
						print("y_hat is")
						print(y_hat)
						# compute loss on test dataset
						loss_test += loss(y_hat, test_trait.long())
						# reshape y_hat as an 1-dimension vector, and insert it to hat
						hat = np.insert(hat, 0, np.argmax(y_hat.to('cpu').detach().numpy(), axis=1), axis=0)
						# reshape acc as an 1-dimension vector, and insert it to acc
						truth = np.insert(truth, 0, test_trait.to('cpu').numpy(), axis=0)
						# compute accuracy scores
						acc_score += metrics.accuracy_score(truth, hat)
						# compute recall scores
						recall_score += metrics.recall_score(truth, hat, average='macro')
						# compute F1 scores
						f1_score += metrics.f1_score(truth, hat, average='macro')
						# compute precision scores
						precision += metrics.precision_score(truth, hat, average='macro')
					print("hat is")
					print(hat)
					print("truth is ")
					print(truth)
					# count evaluation indexes
					loss_test = loss_test / (index + 1)
					eval_dict['train_loss'].append(float(avg_train_loss.to('cpu').detach().numpy()))
					eval_dict['test_loss'].append(float(loss_test.to('cpu').detach().numpy()))
					eval_dict['acc'].append(acc_score / (index + 1))
					eval_dict['recall'].append(recall_score / (index + 1))
					eval_dict['f1_score'].append(f1_score / (index + 1))
					eval_dict['precision'].append(precision / (index + 1))
					# save the best weight with the highest coefficient value
					value = np.array(list(eval_dict.values()))[2:, -1].mean()
					if value > eval_value:
						# compute the confuse matrix
						truth = list(map(int, truth))
						hat = list(map(int, hat))
						confusion_matrix = metrics.confusion_matrix(truth, hat, labels=list(self.p_trait_dic[trait].keys()),
																	sample_weight=None)
						# refresh the evaluation value
						eval_value = value
						torch.save(net, os.path.join(weight_save_path, f'{trait}_best.pt'))
				epoch -= 1
			print(f"weight are saved in {weight_save_path}/{trait}_best.pt")
			# set the trait of this evaluation dictionary and save the confusion matrix
			eval_dict['trait'] = trait
			eval_dict['confusion_matrix'] = confusion_matrix
			if draw:
				Eval(eval_dict, pic_save_path, self.p_trait_dic)
				print(f"pictures are saved in {pic_save_path}")
		# snp_pos = weight_decoder(net, decoder,self.d.columns, percentage=percentage)
		# print(f"Top {percentage * 100}% most important SNP positions are: {snp_pos}")
		return eval_dict
	

def train_test(self, net, epoch, loss, mse_loss, evai_dict, test_dataloader):
	while epoch:
		net.eval()
		mse = 0.0
		loss_test = 0.0
		hat = np.array([])
		truth = np.array([])
		for index, (test_genomap, test_trait) in enumerate(test_dataloader):
			# load the feature maps and traits on device
			test_genomap, test_trait = test_genomap.to(self.device), test_trait.to(self.device)
			# the predict results
			y_hat = net(test_genomap)
			# compute loss on test dataset
			loss_test += loss(y_hat, test_trait.long())
			# reshape y_hat as an 1-dimension vector, and insert it to hat
			hat = np.insert(hat, 0, y_hat.to('cpu').detach().numpy().reshape(len(y_hat), ), axis=0)
			# reshape acc as an 1-dimension vector, and insert it to acc
			truth = np.insert(truth, 0, test_trait.to('cpu').numpy(), axis=0)
			# compute MSE between predict results and actual values
			mse += mse_loss(y_hat, test_trait)
			loss_test = loss_test / (index + 1)


def pic_draw(eval_dict, save_dir):
	x = [i for i in range(1, len(eval_dict['train_loss']) + 1)]
	train_loss = eval_dict['train_loss']
	test_loss = eval_dict['test_loss']
	trait = eval_dict['trait']
	mse = eval_dict['mse']
	avg_mse = eval_dict['avg_mse']
	r = eval_dict['r']

	# loss curve
	plt.plot(x, train_loss, label='train', linewidth=0.9)
	plt.plot(x, test_loss, label='test', linewidth=0.9)
	plt.xlabel('Epoch', fontsize=13)
	plt.ylabel('SmoothL1 Loss', fontsize=13)
	plt.legend(loc='best')
	plt.savefig(os.path.join(save_dir, f'{trait}_loss.svg'))
	plt.close()
	# mse curve
	plt.plot(x, mse, linewidth=0.9)
	plt.plot(x, avg_mse, linewidth=0.9)
	plt.xlabel('Epoch', fontsize=13)
	plt.ylabel('Meansquare Error', fontsize=13)
	plt.savefig(os.path.join(save_dir, f'{trait}_mse.svg'))
	plt.close()
	# coefficient curve
	plt.plot(x, r, linewidth=0.9)
	plt.xlabel('Epoch', fontsize=13)
	plt.ylabel('Pearson Correlation Coefficient', fontsize=13)
	plt.savefig(os.path.join(save_dir, f'{trait}_r.svg'))
	plt.close()

def copy_file(source, destination):
    try:
        shutil.copy(source, destination)
        print(f"文件 {source} 已成功复制到 {destination}")
    except Exception as e:
        print(f"复制文件时出现错误：{e}")

# an example
t = Train("data/train_example.vcf", "data/train_example.csv")
print(t.train_p())

copy_file('nohup.out',nohupout_path+"nohup.out")


