# -*- coding: utf-8 -*-
from .required_modules import *
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np

"""
Contrasive Road network and Travel chain pre training (CRNTCP).

The road network is given as osm or shp, while the trip chain is given as a csv such as in openPFLOW format. 



"""
# for self-attention masking
def sequence_mask(seq:torch.LongTensor, padding_idx:int=None) -> torch.BoolTensor:
	""" seq: [bsz, slen], which is padded, so padding_idx might be exist.     
	if True, '-inf' will be applied before applied scaled-dot attention"""
	return seq == padding_idx

# for decoder's look-ahead masking 
#		array with 0 and -inf
def look_ahead_mask(tgt_len:int, src_len:int) -> torch.FloatTensor:  
	""" this will be applied before sigmoid function, so '-inf' for proper positions needed. 
	look-ahead masking is used for decoder in transformer, 
	which prevents future target label affecting past-step target labels. """
	mask = torch.triu(torch.ones(tgt_len, src_len), diagonal=1)
	mask[mask.bool()] = -float('inf')
	return mask
#
#array with 0 and 1
def look_ahead_mask1(tgt_len:int, src_len:int) -> torch.FloatTensor:  
	""" this will be applied before sigmoid function, so '-inf' for proper positions needed. 
	look-ahead masking is used for decoder in transformer, 
	which prevents future target label affecting past-step target labels. """
	res = torch.ones(tgt_len, src_len)
	mask = torch.triu(torch.ones(tgt_len, src_len), diagonal=1)
	res[mask.bool()] = float(.0)
	return res

class MorphEncoder(nn.Module):
	"""
	Encode the morph embeding.
	
	The input is the morph_embedding, with shape (*, morph_embeding_dim)
	
	Output is the (*, output_dim)
	
	morphencoder = model.MorphEncoder()
	res = morphencoder(input)
	
	For instance:

		morphencoder = model.MorphEncoder(morph_embeding_dim = 2, output_dim = 30)
		res = morphencoder(torch.zeros(7, 11, 2))
		res.shape returns:
		
		torch.Size([7, 11, 30])
			
	
	"""
	
	def __init__(self, \
		input_dim = 2, \
		output_dim = 30):
		"""
		
		----------------------------------------------
		
		torch.nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=<function relu>, layer_norm_eps=1e-05, batch_first=False, norm_first=False, bias=True, device=None, dtype=None)
		
		----------------------------------------------
		@input: dim_network_encoder_input and dim_network_encoder_output
		
			each network encoder takes the osm embeding as input, and output the encoder
			
		@input: decoder_arg_d_model
		
			the dimension of the decoder input/ 
			
			It should be consistent with the dim of the embedding of the trip chain data.
			
			A trip chain is represented via (t,lon,lat,mode), and mode is embedded as mode_dim, hence:
			
				decoder_arg_d_model = 3 + mode_dim
		
		@input: mode_classified_dim
			
			the dimension of the classifier. which should be 8 (=5modes + one trajectory begin + one trajectory end + NaN)
		
		"""
		super().__init__()
		
		self.encoder = nn.Sequential(
                nn.Linear(input_dim, 512, bias=True),
                nn.GELU(),
                nn.Linear(512, 256, bias=True),
                nn.GELU(),
                nn.Linear(256, output_dim),
            )
	
	def forward(self, morph_embedding):
		"""
		@input: morph_embedding
			
			tensor with shape (batch, morph_embeding_dim)
			
		@output: morph_encoding
			
			shape is  (batch, output_dim)
			
		"""
		return self.encoder(morph_embedding)

class GenerativeTripChain_v2(nn.Module):
	"""
	Transformer decoder architecture for the multi mode trip chain generation.
	
	Difference:
		
		class GenerativeTripChain_v2
		class GenerativeTripChain_v1
		
	The output of the latter one combine t-lon-lat, while the former one seperate t from lon-lat, and output is delta_t, delta_lon,delta_lat:
		
		delta_t, delta_lon,delta_lat.
	
	
	
	
	"""
	
	
	
	
	
	def __init__(self, \
		#the following should be set simultanously with data process,
		mode_embedding_dim = 17,\
		morph_embeding_dim = 10, \
		mode_classsification_N = 8, \
		#
		decoder_arg_nhead = 20, \
		decoder_arg_dim_feedforward = 100,
		num_transfomer_decoder = 10, \
		#
		#dim_network_encoder_input = 30, \
		#dim_network_encoder_output = 30, \
		):
		"""
		
		----------------------------------------------
		
		torch.nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=<function relu>, layer_norm_eps=1e-05, batch_first=False, norm_first=False, bias=True, device=None, dtype=None)
		
		----------------------------------------------
		@input: mode_embedding_dim, mode_classsification_N
		
			mode_embedding_dim is the dimensiion for mode embeding. 
			
			mode_classsification_N is the number of modes for classification. It should be 8:
			
				'8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.
		
		@input: dim_network_encoder_input and dim_network_encoder_output
		
			each network encoder takes the osm embeding as input, and output the encoder
			
		@input: decoder_arg_d_model
		
			the dimension of the decoder input/ 
			
			It should be consistent with the dim of the embedding of the trip chain data.
			
			A trip chain is represented via (t,lon,lat,mode), and mode is embedded as mode_dim, hence:
			
				decoder_arg_d_model = 3 + mode_dim
		
		@input: mode_classified_dim
			
			the dimension of the classifier. which should be 8 (=5modes + one trajectory begin + one trajectory end + NaN)
		
		"""
		super().__init__()
		
		if (3 + mode_embedding_dim) % decoder_arg_nhead !=0:
			print('3 + mode_classified_dim is',3 + mode_embedding_dim, '   nhead is ', decoder_arg_nhead)
			raise ValueError('d_model (i.e. 3 + mode_classified_dim) must be divisble regarding to nhead')
		#
		decoder_arg_d_model = 3 + mode_embedding_dim
		#=========================transformer encoders
		#input should be (seq_len, bath_N, 3+mode_classified_dim), '3' is because of 't,lon,lat'
		#print(3 + mode_classified_dim, decoder_arg_nhead, decoder_arg_dim_feedforward)
		self.first_transformer_decoder = nn.TransformerDecoderLayer(d_model = decoder_arg_d_model, \
																	nhead = decoder_arg_nhead, \
																	dim_feedforward = decoder_arg_dim_feedforward)
		#each layer input is (seq_len, bath_N, decoder_arg_d_model)
		#print(decoder_arg_d_model, decoder_arg_nhead, decoder_arg_dim_feedforward)
		self.transformer_decoder_layers = [nn.TransformerDecoderLayer(d_model = decoder_arg_d_model, \
																	nhead = decoder_arg_nhead,\
																	dim_feedforward = decoder_arg_dim_feedforward).to('cuda') \
																	for i in range(num_transfomer_decoder)]
		#========================morph encoding layers
		#	the output of each morph encoding is treated as the memory for transformer layers. 
		self.first_morph_encoder = MorphEncoder(input_dim = 2, output_dim = morph_embeding_dim)
		self.morph_encoder_layers =  [MorphEncoder(input_dim = 2, output_dim = morph_embeding_dim) \
																	for i in range(num_transfomer_decoder)]
		
		
		#mode_classifier is a network that identify the mode
		#	there are 5 modes and one begining, ending, and NaN.
		#	therefore the class num is 8. 
		#its input is from the last transformer_decoder_layer. 
		#	input shape is (seq_len, bath_num, decoder_arg_d_model)
		#		during training, 
		#			seq_len is num_trajectory_points + 2
		#	output shape is (seq_len, bath_num, mode_classsification_N)
		#		further, output of mode_classifier satisfy output.sum(dim = 2) should be array shape (seq_len, bath_num) with all '1'
		self.mode_classifier = nn.Sequential(
                nn.Linear(decoder_arg_d_model, 512, bias=False),
                nn.LeakyReLU(),
                nn.Linear(512, 256, bias=False),
                nn.LeakyReLU(),
                #
                #the dim of last layer is 2, which are for 
                nn.Linear(256, mode_classsification_N),
                nn.Softmax(dim = 2 ), 
            )
		
		#
		#its input is from the last transformer_decoder_layer. 
		#	input shape is (seq_len, bath_num, decoder_arg_d_model)
		#		during training, 
		#			seq_len is num_trajectory_points + 2
		#
		#	output shape is (seq_len, bath_num, 3)
		#		'3' is for t,lon and lat respectively
		self.delta_lonlat_decoding = nn.Sequential(
                nn.Linear(decoder_arg_d_model, 512, bias=True),
                nn.LeakyReLU(),
                nn.Linear(512, 256, bias=True),
                nn.LeakyReLU(),
                #
                #the dim of last layer is 2, which are for 'delta_lon,lat' respectively. 
                nn.Linear(256, 2),
            )


		#its input is from the last transformer_decoder_layer. 
		#	input shape is (seq_len, bath_num, decoder_arg_d_model)
		#		during training, 
		#			seq_len is num_trajectory_points + 2
		#
		#	output shape is (seq_len, bath_num, 3)
		#		'3' is for t,lon and lat respectively
		self.delta_t_decoding = nn.Sequential(
                nn.Linear(decoder_arg_d_model, 512, bias=True),
                nn.LeakyReLU(),
                nn.Linear(512, 256, bias=True),
                nn.LeakyReLU(),
                #
                #the dim of last layer is 1, which are for 'delta_t'. 
                nn.Linear(256, 1),
                nn.ReLU(),
            )


	def forward(self, embedding_trip, morph_embedding = False, POS_map_embeding = False, device = torch.device('cuda'), mask = False):
		"""
		@input: embedding_trip
			
			Whether batch is included can be checked via:
			
				len(embedding_trip.shape)==2
				
				if it holds true, then batch is not included. 
			
			If batch is NOT included, then embedding_trip shape is (N+2,  3+mode_embedding_dim):
			
				For sinlge trip chain, its embedding canbe obtained via:
					
					#read csv dataset. 
					path_tmp = dataspath + 'processed/4201120/'
					tripdata = pd.read_csv(path_tmp + '4201120.csv')
					data_array0 = tripdata.iloc[:,2:].values
					
					reload(DP)
					tripdp = DP.TripDataProcess()
					#	'2:' is because some columns are not for data. 
					eb = tripdp.Trajec2Embedding(tripdata.iloc[:,2:].values, num_embeddings_mode = 8, mode_embedding_dim = 3)
				
				embedding_trip.shape is (N+2,  3+mode_embedding_dim, ), where
					N is the trajectories points number
					2 is for begining and ending
					3 is for moment,lon,lat
					mode_embedding_dim is for mode embedding.
				
				morph_embedding and POS_map_embeding
				
					morph_embedding shape is (2, N_points_in_whole_network)
					POS_map_embeding shape is (morph_embeding_dim, N_points_in_whole_network), where morph_embeding_dim is input arg of self.__init__.
					
					
			#
			If Batch is applied. embedding_trip shape should be (seq_len, batch_N, 3+mode_embedding_dim)
				(seq_len, batch_N, 3+mode_embedding_dim)
			
		@input: mask_decoder
		
			mask_decoder is a tensor. 
		
		@input: morph_embedding,POS_map_embeding
		
			the embedding of the road network morph. 
			
			Its shape should be (batch_N, morph_embeding_dim)
			
			
		"""
		#reshape the embedding_trip to 3d. 
		if len(embedding_trip.shape)==2:
			#Include the batch dimension for trip data
			#	the shape is converted from (N,M) to (N, 1,M)
			embedding_trip = embedding_trip.unsqueeze(1)
			#
			#Include the batch dimension for map data, i.e. morph_embedding, POS_map_embeding
			if not isinstance(morph_embedding, bool):
				#morph_embedding shape should be (2, n_points_in_whole_network)
				#POS_map_embeding shape should be (morph_embeding_dim, n_points_in_whole_network)
				#
				#	converted from (2,n_points_in_whole_network) to (2, 1,n_points_in_whole_network)
				morph_embedding = morph_embedding.unsqueeze(1)
				#	converted from (2,1,n_points_in_whole_network) to (n_points_in_whole_network, 1, 2)
				#		it will be used in self.first_morph_encoder(morph_embedding)
				#		which output (n_points_in_whole_network, 1, morph_encoder_output_dim)
				morph_embedding = morph_embedding.transpose(0,2)
				#
				#	converted from (morph_embeding_dim,n_points_in_whole_network) to (morph_embeding_dim, 1,n_points_in_whole_network)
				POS_map_embeding =  POS_map_embeding.unsqueeze(1)
				#	converted from (morph_embeding_dim, 1,n_points_in_whole_network) to (n_points_in_whole_network, 1,morph_embeding_dim)
				POS_map_embeding = POS_map_embeding.transpose(0,2)
				
		#
		batch_N = embedding_trip.shape[1]
		#reshape the 2d morph_embedding to 3d. 
		#	shape (batch_N, morph_embeding_dim) --> (X, batch_N, decoder_arg_d_model)
		#	X is an integer.
		#if not isinstance(morph_embedding, bool):
		#	morph_embedding_reshaped = morph_embedding.view(morph_embedding.numel()/(batch_N*decoder_arg_d_model), batch_N, decoder_arg_d_model)
		
		#output should be (seq_len, batch_N, decoder_arg_d_model)
		if isinstance(morph_embedding, bool):
			#memory shape should be (some_length, batch_N, dim)
			memory = torch.zeros(1, batch_N, embedding_trip.shape[-1], dtype=torch.float32).to(device)
			#if torch.cuda.is_available():
			#	memory = torch.zeros(1, batch_N, embedding_trip.shape[-1], dtype=torch.float32).cuda()
			#else:
			#	memory = torch.zeros(1, batch_N, embedding_trip.shape[-1], dtype=torch.float32)
			#
			#print(embedding_trip.shape, memory.shape)
			#print(embedding_trip.dtype, memory.dtype)
			output =  self.first_transformer_decoder(tgt = embedding_trip,  memory = memory)#embedding_trip.clone().detach()
			#final output should be (seq_len, batch_N, decoder_arg_d_model)
			for decoder in self.transformer_decoder_layers:
				#print(output.is_cuda, memory.is_cuda)
				output = decoder(tgt = output, memory = memory)
			#
		else:
			#morph_embedding shape is (n_points_in_whole_network, 1, 2)
			#morph_encoding shape is (n_points_in_whole_network, 1, morph_embeding_dim)
			morph_encoding = self.first_morph_encoder(morph_embedding)
			#
			#morph_encoding_reshaped = morph_encoding.view(morph_encoding.numel()/(batch_N*decoder_arg_d_model), batch_N, decoder_arg_d_model)
			#
			output =  self.first_transformer_decoder(tgt = embedding_trip, memory = morph_encoding + POS_map_embeding)#embedding_trip.clone().detach()
			#final output should be (seq_len, batch_N, decoder_arg_d_model)
			for transformer_decoder,morph_encoder in zip(self.transformer_decoder_layers,self.morph_encoder_layers):
				#morph_embedding shape is (n_points_in_whole_network, 1, 2)
				#morph_encoding shape is (n_points_in_whole_network, 1, morph_embeding_dim)
				morph_encoding = morph_encoder(morph_embedding)
				#
				#morph_encoding_reshaped = morph_encoding.view(morph_encoding.numel()/(batch_N*decoder_arg_d_model), batch_N, decoder_arg_d_model)
				output = transformer_decoder(tgt = output, memory = morph_encoding + POS_map_embeding)#embedding_trip.clone().detach()
		
		
		#classifier
		#	mode_class_logits shape is (seq_len, bath_num, mode_classified_dim)
		mode_class_logits = self.mode_classifier(output)
		
		#lonlat location
		#	lonlats shape is (seq_len, bath_num, 2)
		delta_lonlats = self.delta_lonlat_decoding(output)
		
		#delta_t
		#	lonlats shape is (seq_len, bath_num, 1)
		delta_t_ = self.delta_t_decoding(output)
		
		#concat, res.shape is (seq_len, bath_num, 3 + mode_classified_dim)
		res = torch.cat((delta_t_, delta_lonlats, mode_class_logits), dim=2)
		
		#res shape should be  (seq_len, bath_num, 3 + mode_classified_dim)
		#	res[:, :, :3] is  for t--lon--lat, while res[:, :, 3:] is for mode classification. 
		return res
	
	@classmethod
	def ConvertOutput2TripData(self, model_output):
		"""
		@input: model_output
		
			a tensor, with shape (seq_len, bath_num, 3 + mode_classified_number)
				
				model_output[:, :, :3] is  for t--lon--lat
				model_output[:, :, 3:] is for mode classification. 
		
		@OUTPUT: 
				
				
				
		"""
		
		
		
		pass
	
	@classmethod
	def loss(self, modeloutput, groundtruth):
		"""
		
		@input: modeloutput
		
			the output from the self.forward
			
			shape is (seq_len, bath_num, 3+mode_classified_N)
			
			For single trip chain, seq_len should be n_points+1, '1' is for trip ending. 
			
			'3' is for t,lon,lat
			'mode_classified_N' is 8, which include 5 modes and one beging, ending, and NaN
			
		@groundtruth
			
			shape is (seq_len, bath_num, 3+mode_classified_N)
			
			For single trip chain, seq_len should be n_points+1, '1' is for trip ending. 
		
		"""
		#https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html
		#ModeClassLoss(intput,target) output the shape of scalar
		ModeClassLoss = nn.CrossEntropyLoss()
		lonlat_loss = nn.MSELoss()
		
		#init the loss
		LOSS = torch.tensor(.0)
		
		#loss of classifier
		#modeloutput[:,:,:3].permute(1, 2, 0) result the tensor shape (bath_num, 3, seq_len)
		LOSS = LOSS + ModeClassLoss(modeloutput[:,:,:3].permute(1, 2, 0), groundtruth[:,:,:3].permute(1, 2, 0))
		
		LOSS = LOSS + lonlat_loss(modeloutput[:,:,3:], groundtruth[:,:,3:])
		
		return LOSS



class GenerativeTripChain_v1(nn.Module):
	"""
	Transformer decoder architecture for the multi mode trip chain generation.
	
	
	"""
	
	
	
	
	
	def __init__(self, \
		#the following should be set simultanously with data process,
		mode_embedding_dim = 17,\
		morph_embeding_dim = 10, \
		mode_classsification_N = 8, \
		#
		decoder_arg_nhead = 20, \
		decoder_arg_dim_feedforward = 100,
		num_transfomer_decoder = 10, \
		#
		#dim_network_encoder_input = 30, \
		#dim_network_encoder_output = 30, \
		):
		"""
		
		----------------------------------------------
		
		torch.nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=<function relu>, layer_norm_eps=1e-05, batch_first=False, norm_first=False, bias=True, device=None, dtype=None)
		
		----------------------------------------------
		@input: mode_embedding_dim, mode_classsification_N
		
			mode_embedding_dim is the dimensiion for mode embeding. 
			
			mode_classsification_N is the number of modes for classification. It should be 8:
			
				'8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.
		
		@input: dim_network_encoder_input and dim_network_encoder_output
		
			each network encoder takes the osm embeding as input, and output the encoder
			
		@input: decoder_arg_d_model
		
			the dimension of the decoder input/ 
			
			It should be consistent with the dim of the embedding of the trip chain data.
			
			A trip chain is represented via (t,lon,lat,mode), and mode is embedded as mode_dim, hence:
			
				decoder_arg_d_model = 3 + mode_dim
		
		@input: mode_classified_dim
			
			the dimension of the classifier. which should be 8 (=5modes + one trajectory begin + one trajectory end + NaN)
		
		"""
		super().__init__()
		
		if (3 + mode_embedding_dim) % decoder_arg_nhead !=0:
			print('3 + mode_classified_dim is',3 + mode_embedding_dim, '   nhead is ', decoder_arg_nhead)
			raise ValueError('d_model (i.e. 3 + mode_classified_dim) must be divisble regarding to nhead')
		#
		decoder_arg_d_model = 3 + mode_embedding_dim
		#=========================transformer encoders
		#input should be (seq_len, bath_N, 3+mode_classified_dim), '3' is because of 't,lon,lat'
		#print(3 + mode_classified_dim, decoder_arg_nhead, decoder_arg_dim_feedforward)
		self.first_transformer_decoder = nn.TransformerDecoderLayer(d_model = decoder_arg_d_model, \
																	nhead = decoder_arg_nhead, \
																	dim_feedforward = decoder_arg_dim_feedforward)
		#each layer input is (seq_len, bath_N, decoder_arg_d_model)
		#print(decoder_arg_d_model, decoder_arg_nhead, decoder_arg_dim_feedforward)
		self.transformer_decoder_layers = [nn.TransformerDecoderLayer(d_model = decoder_arg_d_model, \
																	nhead = decoder_arg_nhead,\
																	dim_feedforward = decoder_arg_dim_feedforward).to('cuda') \
																	for i in range(num_transfomer_decoder)]
		#========================morph encoding layers
		#	the output of each morph encoding is treated as the memory for transformer layers. 
		self.first_morph_encoder = MorphEncoder(input_dim = 2, output_dim = morph_embeding_dim)
		self.morph_encoder_layers =  [MorphEncoder(input_dim = 2, output_dim = morph_embeding_dim) \
																	for i in range(num_transfomer_decoder)]
		
		
		#mode_classifier is a network that identify the mode
		#	there are 5 modes and one begining, ending, and NaN.
		#	therefore the class num is 8. 
		#its input is from the last transformer_decoder_layer. 
		#	input shape is (seq_len, bath_num, decoder_arg_d_model)
		#		during training, 
		#			seq_len is num_trajectory_points + 2
		#	output shape is (seq_len, bath_num, mode_classsification_N)
		#		further, output of mode_classifier satisfy output.sum(dim = 2) should be array shape (seq_len, bath_num) with all '1'
		self.mode_classifier = nn.Sequential(
                nn.Linear(decoder_arg_d_model, 512, bias=False),
                nn.GELU(),
                nn.Linear(512, 256, bias=False),
                nn.GELU(),
                #
                #the dim of last layer is 2, which are for 
                nn.Linear(256, mode_classsification_N),
                nn.Softmax(dim = 2 ), 
            )
		
		#
		#its input is from the last transformer_decoder_layer. 
		#	input shape is (seq_len, bath_num, decoder_arg_d_model)
		#		during training, 
		#			seq_len is num_trajectory_points + 2
		#
		#	output shape is (seq_len, bath_num, 3)
		#		'3' is for t,lon and lat respectively
		self.lonlatdecoding = nn.Sequential(
                nn.Linear(decoder_arg_d_model, 512, bias=True),
                nn.GELU(),
                nn.Linear(512, 256, bias=True),
                nn.GELU(),
                #
                #the dim of last layer is 3, which are for 't,lon,lat' respectively. 
                nn.Linear(256, 3),
            )
	
	def forward(self, embedding_trip, morph_embedding = False, POS_map_embeding = False, device = torch.device('cuda'), mask = False):
		"""
		@input: embedding_trip
			
			Whether batch is included can be checked via:
			
				len(embedding_trip.shape)==2
				
				if it holds true, then batch is not included. 
			
			If batch is NOT included, then embedding_trip shape is (N+2,  3+mode_embedding_dim):
			
				For sinlge trip chain, its embedding canbe obtained via:
					
					#read csv dataset. 
					path_tmp = dataspath + 'processed/4201120/'
					tripdata = pd.read_csv(path_tmp + '4201120.csv')
					data_array0 = tripdata.iloc[:,2:].values
					
					reload(DP)
					tripdp = DP.TripDataProcess()
					#	'2:' is because some columns are not for data. 
					eb = tripdp.Trajec2Embedding(tripdata.iloc[:,2:].values, num_embeddings_mode = 8, mode_embedding_dim = 3)
				
				embedding_trip.shape is (N+2,  3+mode_embedding_dim, ), where
					N is the trajectories points number
					2 is for begining and ending
					3 is for moment,lon,lat
					mode_embedding_dim is for mode embedding.
				
				morph_embedding and POS_map_embeding
				
					morph_embedding shape is (2, N_points_in_whole_network)
					POS_map_embeding shape is (morph_embeding_dim, N_points_in_whole_network), where morph_embeding_dim is input arg of self.__init__.
					
					
			#
			If Batch is applied. embedding_trip shape should be (seq_len, batch_N, 3+mode_embedding_dim)
				(seq_len, batch_N, 3+mode_embedding_dim)
			
		@input: mask_decoder
		
			mask_decoder is a tensor. 
		
		@input: morph_embedding,POS_map_embeding
		
			the embedding of the road network morph. 
			
			Its shape should be (batch_N, morph_embeding_dim)
			
			
		"""
		#reshape the embedding_trip to 3d. 
		if len(embedding_trip.shape)==2:
			#Include the batch dimension for trip data
			#	the shape is converted from (N,M) to (N, 1,M)
			embedding_trip = embedding_trip.unsqueeze(1)
			#
			#Include the batch dimension for map data, i.e. morph_embedding, POS_map_embeding
			if not isinstance(morph_embedding, bool):
				#morph_embedding shape should be (2, n_points_in_whole_network)
				#POS_map_embeding shape should be (morph_embeding_dim, n_points_in_whole_network)
				#
				#	converted from (2,n_points_in_whole_network) to (2, 1,n_points_in_whole_network)
				morph_embedding = morph_embedding.unsqueeze(1)
				#	converted from (2,1,n_points_in_whole_network) to (n_points_in_whole_network, 1, 2)
				#		it will be used in self.first_morph_encoder(morph_embedding)
				#		which output (n_points_in_whole_network, 1, morph_encoder_output_dim)
				morph_embedding = morph_embedding.transpose(0,2)
				#
				#	converted from (morph_embeding_dim,n_points_in_whole_network) to (morph_embeding_dim, 1,n_points_in_whole_network)
				POS_map_embeding =  POS_map_embeding.unsqueeze(1)
				#	converted from (morph_embeding_dim, 1,n_points_in_whole_network) to (n_points_in_whole_network, 1,morph_embeding_dim)
				POS_map_embeding = POS_map_embeding.transpose(0,2)
				
		#
		batch_N = embedding_trip.shape[1]
		#reshape the 2d morph_embedding to 3d. 
		#	shape (batch_N, morph_embeding_dim) --> (X, batch_N, decoder_arg_d_model)
		#	X is an integer.
		#if not isinstance(morph_embedding, bool):
		#	morph_embedding_reshaped = morph_embedding.view(morph_embedding.numel()/(batch_N*decoder_arg_d_model), batch_N, decoder_arg_d_model)
		
		#output should be (seq_len, batch_N, decoder_arg_d_model)
		if isinstance(morph_embedding, bool):
			#memory shape should be (some_length, batch_N, dim)
			memory = torch.zeros(1, batch_N, embedding_trip.shape[-1], dtype=torch.float32).to(device)
			#if torch.cuda.is_available():
			#	memory = torch.zeros(1, batch_N, embedding_trip.shape[-1], dtype=torch.float32).cuda()
			#else:
			#	memory = torch.zeros(1, batch_N, embedding_trip.shape[-1], dtype=torch.float32)
			#
			#print(embedding_trip.shape, memory.shape)
			#print(embedding_trip.dtype, memory.dtype)
			output =  self.first_transformer_decoder(tgt = embedding_trip,  memory = memory)#embedding_trip.clone().detach()
			#final output should be (seq_len, batch_N, decoder_arg_d_model)
			for decoder in self.transformer_decoder_layers:
				#print(output.is_cuda, memory.is_cuda)
				output = decoder(tgt = output, memory = memory)
			#
		else:
			#morph_embedding shape is (n_points_in_whole_network, 1, 2)
			#morph_encoding shape is (n_points_in_whole_network, 1, morph_embeding_dim)
			morph_encoding = self.first_morph_encoder(morph_embedding)
			#
			#morph_encoding_reshaped = morph_encoding.view(morph_encoding.numel()/(batch_N*decoder_arg_d_model), batch_N, decoder_arg_d_model)
			#
			output =  self.first_transformer_decoder(tgt = embedding_trip, memory = morph_encoding + POS_map_embeding)#embedding_trip.clone().detach()
			#final output should be (seq_len, batch_N, decoder_arg_d_model)
			for transformer_decoder,morph_encoder in zip(self.transformer_decoder_layers,self.morph_encoder_layers):
				#morph_embedding shape is (n_points_in_whole_network, 1, 2)
				#morph_encoding shape is (n_points_in_whole_network, 1, morph_embeding_dim)
				morph_encoding = morph_encoder(morph_embedding)
				#
				#morph_encoding_reshaped = morph_encoding.view(morph_encoding.numel()/(batch_N*decoder_arg_d_model), batch_N, decoder_arg_d_model)
				output = transformer_decoder(tgt = output, memory = morph_encoding + POS_map_embeding)#embedding_trip.clone().detach()
		
		
		#classifier
		#	mode_class_logits shape is (seq_len, bath_num, mode_classified_dim)
		mode_class_logits = self.mode_classifier(output)
		
		#lonlat location
		#	lonlats shape is (seq_len, bath_num, 3)
		lonlats = self.lonlatdecoding(output)
		
		#concat, res.shape is (seq_len, bath_num, 3 + mode_classified_dim)
		res = torch.cat((lonlats, mode_class_logits), dim=2)
		
		#res shape should be  (seq_len, bath_num, 3 + mode_classified_dim)
		#	res[:, :, :3] is  for t--lon--lat, while res[:, :, 3:] is for mode classification. 
		return res
	
	@classmethod
	def ConvertOutput2TripData(self, model_output):
		"""
		@input: model_output
		
			a tensor, with shape (seq_len, bath_num, 3 + mode_classified_number)
				
				model_output[:, :, :3] is  for t--lon--lat
				model_output[:, :, 3:] is for mode classification. 
		
		@OUTPUT: 
				
				
				
		"""
		
		
		
		pass
	
	@classmethod
	def loss(self, modeloutput, groundtruth):
		"""
		
		@input: modeloutput
		
			the output from the self.forward
			
			shape is (seq_len, bath_num, 3+mode_classified_N)
			
			For single trip chain, seq_len should be n_points+1, '1' is for trip ending. 
			
			'3' is for t,lon,lat
			'mode_classified_N' is 8, which include 5 modes and one beging, ending, and NaN
			
		@groundtruth
			
			shape is (seq_len, bath_num, 3+mode_classified_N)
			
			For single trip chain, seq_len should be n_points+1, '1' is for trip ending. 
		
		"""
		#https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html
		#ModeClassLoss(intput,target) output the shape of scalar
		ModeClassLoss = nn.CrossEntropyLoss()
		lonlat_loss = nn.MSELoss()
		
		#init the loss
		LOSS = torch.tensor(.0)
		
		#loss of classifier
		#modeloutput[:,:,:3].permute(1, 2, 0) result the tensor shape (bath_num, 3, seq_len)
		LOSS = LOSS + ModeClassLoss(modeloutput[:,:,:3].permute(1, 2, 0), groundtruth[:,:,:3].permute(1, 2, 0))
		LOSS = LOSS + lonlat_loss(modeloutput[:,:,3:], groundtruth[:,:,3:])
		
		return LOSS





class ContrasiveRoadNetworkTripChainPretraining(nn.Module):
	"""
	
	"""
	
	
	
	pass




class TripGeneration():
	"""
	
	"""
	
	@classmethod
	def StartTokenGivenTripEmbedingInstance(self, trip_data_embeding):
		"""
		Callback:
		
			start_token = TripGeneration.StartTokenGivenTripEmbedingInstance(trip_data_embeding)
			
			
			
		---------------------------------------------
		@input: trajectory_embedding
		
            trajectory_embedding shape is (N+2,  3 + embedding_dim). 
                '3+' are moment, lon and lat while embedding_dim is for mode.
                N is the number of original data points.
                '+2' is because we append a start and ending to the data. 
                
			Obtained via:
				
				trip_data_embeding,groundtruth = DP.TripDataProcess.Trajec2EmbeddingWithLabel(data_array0)
		
		@OUTPUT: start_token
		
			start_token is a tensor, shape is (1, 1, 3 + embedding_dim), they corresponds to (seq_len, batch_N, dim)
		
		"""
		#
		dimN = trip_data_embeding.shape[1]
		
		#start_token0 shape is 
		start_token = trip_data_embeding[0].view(1, 1, dimN)
		
		#shape should be (1 ,1, dim), where dim should be (3+mode_embeding_dim)
		return start_token
		

	@classmethod
	def ContinuousGeneration_using_GenerativeTripChain_v2(self, model, \
		trip_data_embeding = False, \
		ModeEmbedding = False, \
		mask = False, \
		device = torch.device('cuda'), \
		modes_dict = {0:99.0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7}, \
		N_points_max = False, t_MAX_sec = 36000.0, \
		initial_random_seed4embedding = 0, \
		system_configs = {'dim_embedding_map':8, 'mode_embedding_dim':10, 'num_mode_classification':8, }):
		"""
		The generation using model GenerativeTripChain_v2
		
		Compared with GenerativeTripChain_v1, GenerativeTripChain_v2 speerate t-lon-lat, and output:
		
			delta_t, delta_lon, delta_lat
		
		Callback:
		
			GENERATED_TRIP = ContinuousGeneration_using_GenerativeTripChain_v2(model = model)
			
		----------------------------------------------------------
		@input: N_points_max
		
			if greater than this then break
		
		@input: mask
		
		
		@input: ModeEmbedding
		
			It is obtained via:
				
				ModeEmbedding = nn.Embedding(10, 16)
				
			and saved via:
			
				torch.save(ModeEmbedding.state_dict(), checkpoint_path + 'ModeEmbedding.ckpt')
			
			Therefore it can be loaded via:
			
				ModeEmbedding = nn.Embedding(num_embeddings_mode,mode_embedding_dim)
				
				ModeEmbedding.load_state_dict(torch.load(checkpoint_path + 'tmp.ckpt'))
			
			
		@input: model
		
			the trained model of the GenerativeTripChain.
			
			It can be loaded via
			
				model = TheModelClass(*args, **kwargs)
				model.load_state_dict(torch.load(PATH))
		@input: trip_data_embeding
		
			used to generate the start_token.
			
			It can be obtained via:
				
				
				trip_data_embeding,groundtruth = DP.TripDataProcess.Trajec2EmbeddingWithLabel(data_array0)
		
		
		@input: mode_embedding_dim
        
            the dimeision of the embeding of mode. 
            
            For a trip data with t,lon,lat, the num of dimension of trip data is mode_embedding_dim+3, '3' is for t,lon,lat respectively.
        
		@input: mode_classification_N
            
            '8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        
            THe labeling system of mode is as follows:
            
                transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
                modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }, one begging, one ending and a NaN. 
				
				
				modes_dict = {0:99.0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7}
				
		@OUTPUT: generated_trip
		
			is an array with shape (N, 4), '4' is because t-lon-lat-mode, N is the geo points number. 
			
		
		"""
		if isinstance(ModeEmbedding, bool):
			#generate the mode embedding module.
			torch.manual_seed(initial_random_seed4embedding)
			#
			#mode_embedding is an instance.
			#    to use it, simply ModeEmbedding(torch.LongTensor([1]))
			#system_configs = {'dim_embedding_map':8, 'mode_embedding_dim':10, 'num_mode_classification':8, }
			ModeEmbedding = nn.Embedding(num_embeddings = system_configs['num_mode_classification'], embedding_dim = system_configs['mode_embedding_dim']).to('cuda')
			
			begining_tensor0 = torch.tensor(np.array([.0, .0, .0])).to('cuda')
			#    begining_tensor is a 1d tensor, shape is (3+embedding_dim)
			begining_tensor = torch.cat((begining_tensor0, ModeEmbedding(torch.LongTensor([5]).to('cuda'))[0]), dim=0)
			start_token = begining_tensor.view(1, 1, len(begining_tensor)).to(torch.float32)
		else:
			begining_tensor0 = torch.tensor(np.array([.0, .0, .0]))
			#    begining_tensor is a 1d tensor, shape is (3+embedding_dim)
			begining_tensor = torch.cat((begining_tensor0, ModeEmbedding(torch.LongTensor([5]))[0]), dim=0)
			start_token = begining_tensor.view(1, 1, len(begining_tensor)).to(torch.float32)
			
		#GENERATED_TRIP, is an array with shape (N, 4)
		last_t, last_lon, last_lat = torch.tensor(np.array([.0, .0, .0])).to(torch.float32)
		#'5' is begining. 
		#torch.cat((torch.tensor(np.array([.0, .0, .0])), torch.tensor([5])), dim=0) returns 
		#	tensor([0., 0., 0., 5.], dtype=torch.float64)
		GENERATED_TRIP = [torch.cat((torch.tensor(np.array([.0, .0, .0])), torch.tensor([5])), dim=0).to(torch.float32)]
		#torch.cat((out[-1, 0, :3], [5]), dim=0)
		#[[last_t, last_lon, last_lat, 5]]
		
		model = model.to(device)
		
		#provide the start_token.
		#	t-lon-lat
        #start_token0 = torch.tensor(np.array([.0, .0, .0]))
        #    start_token is a 1d tensor, shape is (3+embedding_dim)
        #	'5' is because in mode labeling system, '5' is begining, '6' is ending, 
        #start_token = torch.cat((start_token0, ModeEmbedding(torch.LongTensor([5]))[0]), dim=0)
		#start_token shape is (1, 1, dim), corresponds to (seq_len, batch_N, dim)
		#start_token = self.StartTokenGivenTripEmbedingInstance(trip_data_embeding).to(torch.float32)
		
		#	input_seq shape is (1, 1, dim), corresponds to (seq_len, batch_N, dim)
		input_seq = start_token.to(device)
		#	output shape is (1, 1, dim), corresponds to (seq_len, batch_N, dim)
		out = start_token.to(device)
		#
		#mode_infer_res shape is torch.Tensor, can be used to compare, i.e. mode_infer_res==6
		mode_infer_res = torch.argmax(out[0, 0, 3:])
		#
		while True:
			#-----------------------------------------------
			#	out shape is (seq_len, 1, dim), corresponds to (seq_len, batch_N, dim)
			#	dim is the same as last dim N of input_seq
			if mask:
				#
				seq_len = input_seq.shape[0]
				#
				mask_tensor = look_ahead_mask1(seq_len,seq_len)
				#
				out = model(input_seq.to(device))
			else:
				#
				#print(next(model.parameters()).device)
				input_seq = input_seq.to(device)
				#print("Device: ", input_seq.device)
				#out shape is (seq_len, batch_N, dim)
				out = model(input_seq).to(device)
			#
			#---------------------------------------Mode infer res
			#	mode_infer_res is a zero dimension torch.tensor, no shape
			#			type(mode_infer_res) returns torch.Tensor
			mode_infer_res = torch.argmax(out[-1, 0, 3:]).to('cuda')
			#---------------------------------------Determine end or not
			#mode_infer_res==6 indicate an ending.
			if mode_infer_res==6:
				#
				break
			else:
				#embed the mode, inferred_mode_embedding 1d shape is torch.Size([dim_mode_embedding])
				inferred_mode_embedding = ModeEmbedding(mode_infer_res)
				#singlepoint is a 1d tensor
				singlepoint = torch.cat((out[-1, 0, :3], inferred_mode_embedding), dim=0)
				#	mode_infer_res.unsqueeze(0) returns a 1d tensor.
				#delta_t,delta_lon,delta_lat = out[-1, 0, :3]
				#
				#updated_t_lon_lat is a 1d tensor with length 3
				updated_t_lon_lat = out[-1, 0, :3].to('cuda') + GENERATED_TRIP[-1][:3].to('cuda')
				#
				GENERATED_TRIP.append(torch.cat((updated_t_lon_lat, mode_infer_res.unsqueeze(0)), dim=0))
				#
				#-------------------------Update the inferred to the input seq
				#Stack input_seq and out[-1, 0, :]
				#	input_seq shape is (seq_len, 1, dim), 
				#	out[-1, 0, :] shape is 1d torch.Size([3])
				#reshaped_single_point shape is (1, 1, dim)
				reshaped_single_point= singlepoint.unsqueeze(0).unsqueeze(1)
				#input_seq shape changed from (seq_len, 1, dim) to (seq_len+1, 1, dim)
				#print(input_seq.shape, reshaped_single_point.shape)
				#
				input_seq = torch.cat((input_seq, reshaped_single_point), dim=0)
				
			if not isinstance(N_points_max, bool):
				if input_seq.shape[0]>=N_points_max:
					break
            #
			if not isinstance(t_MAX_sec, bool):
				if updated_t_lon_lat[0]>=t_MAX_sec:
					break
        
		return GENERATED_TRIP
		
		return np.array(GENERATED_TRIP)





	@classmethod
	def ContinuousGeneration_using_GenerativeTripChain_v1(self, model, trip_data_embeding, ModeEmbedding, mask = False, device = torch.device('cuda'), modes_dict = {0:99.0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7}, N_points_max = False):
		"""
		The generation using model GenerativeTripChain_v1
		----------------------------------------------------------
		@input: N_points_max
		
			if greater than this then break
		
		@input: mask
		
		
		@input: ModeEmbedding
		
			It is obtained via:
				
				ModeEmbedding = nn.Embedding(10, 16)
			and saved via:
			
				torch.save(ModeEmbedding.state_dict(), checkpoint_path + 'ModeEmbedding.ckpt')
			
			Therefore it can be loaded via:
			
				ModeEmbedding = nn.Embedding(num_embeddings_mode,mode_embedding_dim)
				
				ModeEmbedding.load_state_dict(torch.load(checkpoint_path + 'tmp.ckpt'))
			
			
		@input: model
		
			the trained model of the GenerativeTripChain.
			
			It can be loaded via
			
				model = TheModelClass(*args, **kwargs)
				model.load_state_dict(torch.load(PATH))
		@input: trip_data_embeding
		
			used to generate the start_token.
			
			It can be obtained via:
				
				
				trip_data_embeding,groundtruth = DP.TripDataProcess.Trajec2EmbeddingWithLabel(data_array0)
		
		
		@input: mode_embedding_dim
        
            the dimeision of the embeding of mode. 
            
            For a trip data with t,lon,lat, the num of dimension of trip data is mode_embedding_dim+3, '3' is for t,lon,lat respectively.
        
		@input: mode_classification_N
            
            '8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        
            THe labeling system of mode is as follows:
            
                transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
                modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }, one begging, one ending and a NaN. 
				
				
				modes_dict = {0:99.0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7}
				
		@OUTPUT: generated_trip
		
			is an array with shape (N, 4), '4' is because t-lon-lat-mode, N is the geo points number. 
			
		
		"""
		#GENERATED_TRIP, is an array with shape (N, 4)
		GENERATED_TRIP = []
		
		model = model.to(device)
		
		#provide the start_token.
		#	t-lon-lat
        #start_token0 = torch.tensor(np.array([.0, .0, .0]))
        #    start_token is a 1d tensor, shape is (3+embedding_dim)
        #	'5' is because in mode labeling system, '5' is begining, '6' is ending, 
        #start_token = torch.cat((start_token0, ModeEmbedding(torch.LongTensor([5]))[0]), dim=0)
		#start_token shape is (1, 1, dim), corresponds to (seq_len, batch_N, dim)
		start_token = self.StartTokenGivenTripEmbedingInstance(trip_data_embeding).to(torch.float32)
		#	input_seq shape is (1, 1, dim), corresponds to (seq_len, batch_N, dim)
		input_seq = start_token.to(device)
		#	output shape is (1, 1, dim), corresponds to (seq_len, batch_N, dim)
		out = start_token.to(device)
		#
		#mode_infer_res shape is torch.Tensor, can be used to compare, i.e. mode_infer_res==6
		mode_infer_res = torch.argmax(out[0, 0, 3:])
		#
		while True:
			#-----------------------------------------------
			#	out shape is (seq_len, 1, dim), corresponds to (seq_len, batch_N, dim)
			#	dim is the same as last dim N of input_seq
			if mask:
				#
				seq_len = input_seq.shape[0]
				#
				mask_tensor = look_ahead_mask1(seq_len,seq_len)
				#
				out = model(input_seq.to(device), )
			else:
				#
				#print(next(model.parameters()).device)
				input_seq = input_seq.to(device)
				#print("Device: ", input_seq.device)
				out = model(input_seq )
			#
			#---------------------------------------Mode infer res
			#	mode_infer_res is a zero dimension torch.tensor, no shape
			#			type(mode_infer_res) returns torch.Tensor
			mode_infer_res = torch.argmax(out[-1, 0, 3:])
			#---------------------------------------Determine end or not
			#mode_infer_res==6 indicate an ending.
			if mode_infer_res==6:
				#
				break
			else:
				#embed the mode, inferred_mode_embedding 1d shape is torch.Size([dim_mode_embedding])
				inferred_mode_embedding = ModeEmbedding(mode_infer_res)
				#singlepoint is a 1d tensor
				singlepoint = torch.cat((out[-1, 0, :3], inferred_mode_embedding), dim=0)
				#	mode_infer_res.unsqueeze(0) returns a 1d tensor.
				#
				GENERATED_TRIP.append(torch.cat((out[-1, 0, :3], mode_infer_res.unsqueeze(0)), dim=0))
				#
				
				#Stack input_seq and out[-1, 0, :]
				#	input_seq shape is (seq_len, 1, dim), 
				#	out[-1, 0, :] shape is 1d torch.Size([3])
				#reshaped_single_point shape is (1, 1, dim)
				reshaped_single_point= singlepoint.unsqueeze(0).unsqueeze(1)
				#input_seq shape changed from (seq_len, 1, dim) to (seq_len+1, 1, dim)
				#print(input_seq.shape, reshaped_single_point.shape)
				#
				input_seq = torch.cat((input_seq, reshaped_single_point), dim=0)
			if not isinstance(N_points_max, bool):
				if input_seq.shape[0]>=N_points_max:
					break
		#
		return GENERATED_TRIP
		
		return np.array(GENERATED_TRIP)

class MyLoss_v2(nn.Module):
    def __init__(self, ):
        super(MyLoss_v2, self).__init__()
        #
        #===========Two types of loss: mode clasification loss and t-lon-lat loss
        #https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html#torch.nn.BCELoss
        self.moment_generation_loss = nn.MSELoss()
        #
        self.mode_classification_loss = nn.CrossEntropyLoss()
        #
        self.t_lon_lat_loss = nn.MSELoss()

    def forward(self, pred, groundtruth, ):
        """
        
        @input: pred,groundtruth
        
			pred size is (batch_N, N+1,  3 + num_embeddings_mode).
			
			groundtruth size is (batch_N, N+1,   3 + num_embeddings_mode)
			
			'3' is for t-lon-lat
			num_embeddings_mode is for mode classification. Each dim is for prob of one specific mode:
			

			
        """
        moment_generation_loss =  self.moment_generation_loss(pred[:, :, 0], groundtruth[:, :, 0])
        #
        mode_classification_loss = self.mode_classification_loss(pred[:, :, 3:], groundtruth[:, :, 3:])
        #
        #'-1' is because last row is ending of the trip and t_lon_lat make no sense.
        t_lon_lat_loss = self.t_lon_lat_loss(pred[:-1, :, 1:3], groundtruth[:-1, :, 1:3])
        
        return mode_classification_loss + t_lon_lat_loss




class MyLoss_v1(nn.Module):
    def __init__(self, ):
        super(MyLoss_v1, self).__init__()
        #
        #===========Two types of loss: mode clasification loss and t-lon-lat loss
        #https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html#torch.nn.BCELoss
        self.mode_classification_loss = nn.CrossEntropyLoss()
        #
        self.t_lon_lat_loss = nn.MSELoss()

    def forward(self, pred, groundtruth, ):
        """
        
        @input: pred,groundtruth
        
			pred size is (batch_N, N+1,  3 + num_embeddings_mode).
			
			groundtruth size is (batch_N, N+1,   3 + num_embeddings_mode)
			
			'3' is for t-lon-lat
			num_embeddings_mode is for mode classification. Each dim is for prob of one specific mode:
			

			
        """
        #
        mode_classification_loss = self.mode_classification_loss(pred[:, :, 3:], groundtruth[:, :, 3:])
        #
        #'-1' is because last row is ending of the trip and t_lon_lat make no sense.
        t_lon_lat_loss = self.t_lon_lat_loss(pred[:-1, :, :3], groundtruth[:-1, :, :3])
        
        return mode_classification_loss + t_lon_lat_loss



