# -*- coding: utf-8 -*-
"""

"""
import csv
import os
import logging
import pickle
import numpy as np
from model.required_modules import *
import DataProcess as DP
import model.ContrasiveRoadNetworkTripChainPretraining as model
#
ROOTDIR = '/media/znr/Z/morphGPT/'
#
#dataspath = ROOTDIR + 'datas/'
#checkpoint_path = ROOTDIR + 'codes/checkpoints/'
#
if __name__ == '__main__':
	parser = argparse.ArgumentParser(description='morphGPT')
	parser.add_argument('--name', type=str, help='log name (default: "Exp1")', default="Exp2")
	parser.add_argument('--ROOTDIR', type=str, default='/GPUFS/zju_qhs_1/morphGPT/', help='root of morphGPT')
	parser.add_argument('--stereo', action='store_true', help='if yes, use RoadBEV-stereo; otherwise, RoadBEV-mono')
	parser.add_argument('--cla_res', type=float, default=0.5, help='class resolution for elevation classification')
	parser.add_argument('--batch_size', type=int, default=64, help='training batch size')
	parser.add_argument('--lr', type=float, default=8e-4, help='maximum learning rate')
	parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')
	parser.add_argument('--logdir', default='./checkpoints/', help='the directory to save logs and checkpoints')
	parser.add_argument('--loadckpt', default=None, help='load the weights from a specific checkpoint')
	parser.add_argument('--summary_freq', type=int, default=20, help='summary_freq')
	parser.add_argument('--seed', type=int, default=307, metavar='S', help='random seed')
	parser.add_argument('--osm_path', default='./checkpoints/', help='the path of the osm data')

	args = parser.parse_args()
	#
    
    #
	dataspath = ROOTDIR + 'datas/'
	checkpoint_path = ROOTDIR + 'codes/checkpoints/'
	processed_data_path = ROOTDIR + 'datas/processed/'
	#
	reload(DP);reload(model)
	#
	device = torch.device("cuda")
	#dim_embedding_map should equals to mode_embedding_dim+3, '3' is for t,lon and lat. 
	system_configs = {'dim_embedding_map':8, 'mode_embedding_dim':10, 'num_mode_classification':8}
	#
	training_data = DP.morphGPTDataSet(root_dir = processed_data_path)
	train_dataloader = DataLoader(training_data, batch_size = args.batch_size, shuffle=True, collate_fn = DP.my_collate_fn)
	#
	#model = tripchaingenerative.to(device)
	tripchaingenerative = model.GenerativeTripChain_v2(\
            mode_embedding_dim = system_configs['mode_embedding_dim'], \
            morph_embeding_dim  = system_configs['dim_embedding_map'], \
            mode_classsification_N = system_configs['num_mode_classification'], \
            decoder_arg_nhead = 13 )
    #
    #
	tripchaingenerative = tripchaingenerative.to(device)
	MyLoss = model.MyLoss_v2().to(device)
	#
	optimizer = optim.AdamW(tripchaingenerative.parameters(), lr=args.lr)


	def initLogging(log_file: str, level: str = "INFO"):
		logging.basicConfig(filename=log_file, filemode='w',level=getattr(logging, level, None),format='[%(levelname)s %(asctime)s] %(message)s',datefmt='%m-%d %H:%M:%S')
		logging.getLogger().addHandler(logging.StreamHandler())

	log_path = f"./training_log/{args.name}/"
	os.makedirs(log_path, exist_ok=True)
	initLogging(log_file=log_path + 'train.log')
	logging.info("------------- {} -------------".format(args.name))
	logging.info("Batch size: {}".format(args.batch_size))
	logging.info("Learning rate: {}".format(args.lr))
	logging.info("Use device: {}".format(device))
	logging.info("Model Params: {}".format(sum(p.numel() for p in tripchaingenerative.parameters())))


	LOSS_es = []
	print(len(train_dataloader))
	for epoch_idx in tqdm(range(args.epochs)):
		logging.info(f"Epoch {epoch_idx + 1}/{args.epochs}")
		epoch_loss = []
		with tqdm(train_dataloader, desc="Training", unit="batch") as data_epoch:
			for i, sample in enumerate(data_epoch):
				optimizer.zero_grad()
				#
				inputt,groundtruth = sample[0],sample[1]
				inputt = inputt.to(device)
				groundtruth = groundtruth.to(device)
				#inputt,groundtruth = sample[0].cuda(),sample[1].cuda()
				pred = tripchaingenerative(inputt, device = device)
				#
				LOSS = MyLoss(pred, groundtruth)
				LOSS_es.append(LOSS.item())
				epoch_loss.append(LOSS.item())
				#
				LOSS.backward()
				optimizer.step()
				#
				data_epoch.set_postfix(loss='{:.4f}'.format(np.mean(epoch_loss)))


				# if i%1000==0:
				# 	torch.save(tripchaingenerative.state_dict(), checkpoint_path + 'epoch_{}_step_{}.ckpt'.format(epoch_idx+1, i))
			#

		avg_epoch_loss = np.mean(epoch_loss)


		log = {'epoch': epoch_idx +1, 'loss': avg_epoch_loss}

		if epoch_idx == 0:
			with open(f'./training_log/{args.name}/train_log.csv', 'w') as csv_file:
				writer = csv.writer(csv_file)
				writer.writerow(log.keys())
				writer.writerow(log.values())
		else:
			with open(f'./training_log/{args.name}/train_log.csv', 'a') as csv_file:
				writer = csv.writer(csv_file)
				writer.writerow(log.values())
			
		torch.save(tripchaingenerative.state_dict(),  f'training_log/{args.name}/model_epoch_{epoch_idx+1}.ckpt')

		with open(f'training_log/{args.name}/LOSS_es.pkl', 'wb') as f:
   			pickle.dump(LOSS_es, f)
