# -*- coding: utf-8 -*-

"""

apy qhs_test.py
"""
from model.required_modules import *
#from required_modules import *
#
from datetime import datetime
import torch
import torch.nn as nn
#
import torch.utils.data
import numpy as np
import builtins
#
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
#

def my_collate_fn(batch):
    """
    Used as each trip data is of different length
    
    To use it:
        
        DataLoader(toy_dataset, collate_fn = my_collate_fn, batch_size=5)
    
    -----------------------------------------
    
    @input: batch
    
        each element in batch should be from dataset.__getitem()__
        
        In our case, it is (model_input_tensor, groundtruth_tensor)
        
        model_input_tensor shape (N+2,  3 + embedding_dim).
        groundtruth_tensor shape is (N+2,  3 + num_embeddings_mode)
        
        For each sample, N is different. 
    
    @OUTPUT: INTPUTS, GROUNDTRUTHS
    
        
        shape is (*, batch_N, *)
            
        
    """
    #find the minimal length of the samples.
    triplengths  = [sample[0].shape[0] for sample in batch]

    size = min(triplengths)
    
    #'-1' is because each trip data (suppose N geo points in this trip) is augmented with begingingg and ending
    #        which leads model_input_tensor shape to (N+2, *) and groundtruth_tensor shape (N+2, *)
    #        real input should be model_input_tensor[:N+1, :], 
    #        real groundtruth should be groundtruth_tensor[1:N+2, :],
    INPUTS  = [sample[0][:size-1, :] for sample in batch]
    GROUNDTRUTHS = [sample[1][1:size, :] for sample in batch]
    #builtins.tmp  = INPUTS,GROUNDTRUTHS
    #print(len(INPUTS), INPUTS[0].shape, len(GROUNDTRUTHS), GROUNDTRUTHS[0].shape)
    
    return torch.stack(INPUTS, dim = 1).to(torch.float32),torch.stack(GROUNDTRUTHS, dim = 1).to(torch.float32)
    #return torch.tensor(INPUTS),torch.tensor(GROUNDTRUTHS)
    #pass
#
class morphGPTDataSet(Dataset):
	
    def __init__(self, root_dir, typee = 'training', transform=None, target_transform=None):
        self.root_dir = root_dir
        
        """
        each input tensor shape is (N+2,  3 + embedding_dim). 
                '3+' are moment, lon and lat while embedding_dim is for mode.
                N is the number of original data points.
                '+2' is because we append a start and ending to the data. 
        Each groundtruth shape is (N+2,  3 + num_embeddings_mode)
                'num_embeddings_mode' is because the network output the probability of the each mode (including 5 modes, with one begining, one ending and one NaN)
                
                '3+' are moment, lon and lat while embedding_dim is for mode.
                
                N is the number of original data points.
        """
        self.root_dir = root_dir
        if typee == 'training':
            self.input_dir = root_dir + 'train/input/'
            self.label_dir = root_dir + 'train/groundtruth/'
            #
            self.input_file_names = os.listdir(self.input_dir)
            self.label_file_names = os.listdir(self.label_dir)
        elif typee == 'validate':
            self.input_dir = root_dir + 'validate/input/'
            self.label_dir = root_dir + 'validate/groundtruth/'
            #
            self.input_file_names = os.listdir(self.input_dir)
            
            self.label_file_names = os.listdir(self.label_dir)
    #
    def __len__(self):
        # Get all file names in the directory
        return len(self.input_file_names)
        
    def __getitem__(self, idx):
        #tensor with shape (N+2,  3 + embedding_dim).
        inputt  = torch.load(self.input_dir + self.input_file_names[idx])
        #tensr with shape (N+2,  3 + num_embeddings_mode)
        label = torch.load(self.label_dir + self.label_file_names[idx])
        #
        return inputt,label


# for self-attention masking
def sequence_mask(seq:torch.LongTensor, padding_idx:int=None) -> torch.BoolTensor:
    """ seq: [bsz, slen], which is padded, so padding_idx might be exist.     
    if True, '-inf' will be applied before applied scaled-dot attention"""
    return seq == padding_idx

# for decoder's look-ahead masking 
def look_ahead_mask(tgt_len:int, src_len:int) -> torch.FloatTensor:  
    """ this will be applied before sigmoid function, so '-inf' for proper positions needed. 
    look-ahead masking is used for decoder in transformer, 
    which prevents future target label affecting past-step target labels. """
    mask = torch.triu(torch.ones(tgt_len, src_len), diagonal=1)
    mask[mask.bool()] = -float('inf')
    return mask
#array with 0 and 1
def look_ahead_mask1(tgt_len:int, src_len:int) -> torch.FloatTensor:  
    """ this will be applied before sigmoid function, so '-inf' for proper positions needed. 
    look-ahead masking is used for decoder in transformer, 
    which prevents future target label affecting past-step target labels. """
    res = torch.ones(tgt_len, src_len)
    mask = torch.triu(torch.ones(tgt_len, src_len), diagonal=1)
    res[mask.bool()] = float(.0)
    return res

def AddBatchDimension(tensor_raw, added_dim = 0):
    """
    Add the batch dimension.
    """
    
    
    
    pass


class TripDataProcess():
    """
    
    
    
    The labeling of the mode:
    
        transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
        modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }
        
    Mode embedding:
    
        torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, _freeze=False, device=None, dtype=None)
        
        #We use 6 embedings (include 4 modes, 1 start and 1 ending)
        num_embeddings = 6
        embedding_dim = 12
        #
        mode_embedding = nn.Embedding(num_embeddings,embedding_dim)
        #
        mode_embedding(torch.LongTensor([1]))
        
    
    """
    
    
    
    
    @classmethod
    def mode_convert(self, modes, modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, np.nan:7}):
        #'8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        return np.array([int(modes_dict[m]) for m in modes])
    
    @classmethod
    def Trajec2EmbeddingWithLabel_delta_t_delta_lon_lat(self, data_array0, num_embeddings_mode = 8, mode_embedding_dim = 10, shift_lonlat = False, initial_random_seed4embedding = 0):
        """  
        Difference:
        
        	self.Trajec2EmbeddingWithLabel_delta_t_delta_lon_lat
        	self.Trajec2EmbeddingWithLabel
        	
        The formerone using delta_t,delta_lon,delta_lat instead of t---lon---lat
        
        ---------------------------------------
        Callback:
        
            trip_data_embeding,groundtruth = DP.TripDataProcess.Trajec2EmbeddingWithLabel(data_array0)
        
        DIfference:
        
            self.Trajec2Embedding
            self.Trajec2EmbeddingWithLabel
        
        The latter one returns the ground truth label either. 
        
        Callback:
            
            #tripdata is a pandas. 
            tripdata = pd.read_csv(path_tmp + '4201120.csv')
            #data_array0 shape is (N,4), N is the data points number, '4' is for time,lon,lat,mode
            data_array = tripdata.iloc[:,2:].values
            #
            tripdp = DP.TripDataProcess()
            #eb is the output, shape is (N+2, 3+mode_embedding_dim)
            eb,groundtruth = tripdp.Trajec2EmbeddingWithLabel(data_array)
        
        -----------------------------------------------
        Convert each trajectory to the embedding. 
        -----------------------------------------------
        @data_array: data_array0
        
            The format is :
            
                array([['2017-01-12 04:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 05:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 06:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 07:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 07:40:00', 139.58046804, 35.69133921, 2.0],
                       ['2017-01-12 07:43:40', 139.58033062, 35.6898477, 2.0],
                       ['2017-01-12 07:44:10', 139.58131812, 35.68815623, 2.0],
                       ['2017-01-12 07:45:10', 139.58591766, 35.68986446, 2.0],
                       ['2017-01-12 07:45:20', 139.58643011, 35.69004779, 2.0]],
                      dtype=object)
            
            The columns are moment,lon,lat, mode. Their format are str,float,float,float.
        
        @input: num_embeddings_mode
        
            '8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        
        @input: mode_embedding_dim
        
            the dimeision of the embeding of mode. 
            
            For a trip data with t,lon,lat, the num of dimension of trip data is mode_embedding_dim+3, '3' is for t,lon,lat respectively.
        
        @OUTPUT: trajectory_embedding,groundtruth
            
            trajectory_embedding shape is (N+2,  3 + embedding_dim). 
                '3+' are moment, lon and lat while embedding_dim is for mode.
                N is the number of original data points.
                '+2' is because we append a start and ending to the data. 
            
            groundtruth is the shape of (N+2,  3 + num_embeddings_mode)
            
                'num_embeddings_mode' is because the network output the probability of the each mode (including 5 modes, with one begining, one ending and one NaN)
                
                '3+' are moment, lon and lat while embedding_dim is for mode.
                
                N is the number of original data points.
        
            THe labeling system of mode is as follows:
            
                transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
                modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }
        
        """
        torch.manual_seed(initial_random_seed4embedding)
        #
        #mode_embedding is an instance.
        #    to use it, simply ModeEmbedding(torch.LongTensor([1]))
        ModeEmbedding = nn.Embedding(num_embeddings_mode,mode_embedding_dim)
        
        #===========================================begin array and ending tensor,
        #                                            ending_tensor is used to indicate the ending of a trajectory.
        #start_t,start_lon,start_lat  are floats.
        start_t,start_lon,start_lat = data_array0[0, 0],float(data_array0[0, 1]), float(data_array0[0, 2])
        #
        #begin_point = np.array([start_t, start_lon, start_lat, 5.0])
        #    convert to 2d array. 
        #begin_point = begin_point.reshape(1, -1)
        #'5' is the trip chain beginging label. 
        begining_tensor0 = torch.tensor(np.array([.0, .0, .0]))
        #    begining_tensor is a 1d tensor, shape is (3+embedding_dim)
        begining_tensor = torch.cat((begining_tensor0, ModeEmbedding(torch.LongTensor([5]))[0]), dim=0)
        #return begining_tensor
        #    the former three variables, i.e. time,lon,lat
        ending_tensor0 = torch.tensor(np.array([.0, .0, .0]))
        #    the embeddgin for the mode (which represent the ending of the trajectory) is ModeEmbedding(torch.LongTensor([7]))[0]
        #    ending_tensor is a 1d tensor with shape (3+embedding_dim)
        ending_tensor = torch.cat((ending_tensor0, ModeEmbedding(torch.LongTensor([7]))[0]), dim=0)
        #return ending_tensor
        #data_array = np.concatenate((begin_point, data_array0), axis=0)
        #builtins.tmp = data_array
        
        #======================================================diff_ts,diff_lons,diff_lats
        #they are all 1d array, ---> length equals number of points, (a virtual start point is added)  <----
        #----------------------------convert moment to sec, and SHIFT. result in diff_t
        #    each m in moment is like: datetime.datetime(2017, 1, 12, 4, 0)
        moments = [datetime.strptime(t, '%Y-%m-%d %H:%M:%S') for t in data_array0[:, 0]]
        #print(moments)
        #    float.
        moments_sec = [moments[0].timestamp()]+ [m.timestamp() for m in moments]
        #moments_sec = [m.timestamp() for m in moments]
        #moments_sec = np.array(moments_sec) - min(moments_sec)
        #length is n_points.
        diff_ts = np.diff(moments_sec)
        #
        #----------------------------diff_lons,diff_lats
        #print(start_t,start_lon,start_lat)
        #builtins.tmp = data_array[:, 1]
        if shift_lonlat:
            lons = data_array0[:, 1] - start_lon
            lats = data_array0[:, 2] - start_lat
        else:
            lons = data_array0[:, 1]
            lats = data_array0[:, 2]
        #diff_lon,diff_lat
        #diff_lons =  np.diff(lons)
        #diff_lats =  np.diff(lats)
        diff_lons =  np.concatenate((np.array([.0], dtype=data_array0.dtype), np.diff(lons)))
        diff_lats =  np.concatenate((np.array([.0], dtype=data_array0.dtype), np.diff(lats)))
        
        #===============================================modes embeding.
        #mode_embeddings shape is (data_array.shape[0], mode_embedding_dim)
        #builtins.tmp  = self.mode_convert(data_array0[:, -1])
        mode_labels_of_data = self.mode_convert(data_array0[:, -1])
        mode_embeddings = ModeEmbedding(torch.LongTensor(mode_labels_of_data))
        
        #=============================================Concat
        #tmp1, tmp2 and tmp3 corresponds to moments, lons, lat respectively.
        tmp1 = torch.tensor(diff_ts).view(-1, 1)#
        tmp2 = torch.tensor(diff_lons.astype(np.float32)).view(-1, 1)
        tmp3 = torch.tensor(diff_lats.astype(np.float32)).view(-1, 1)
        #embedding_without_trajectoryending is 2d tensor with shape (N+1, 3+mode_embeddings)
        #print(tmp1.shape, tmp2.shape, tmp3.shape)
        embedding_without_trajectoryending =  torch.cat((tmp1, tmp2, tmp3, mode_embeddings), dim=1)
        
        #trip_data_embeding shape is (N+2,  3 + embedding_dim)
        trip_data_embeding = torch.cat((begining_tensor.view(1, -1), embedding_without_trajectoryending, ending_tensor.view(1, -1)), dim=0)
        
        #=====================================Ground truth.
        #-------------------------mode ground truth
        #        transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
        #        modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }, one begging, one ending and a NaN.  TOTAL 8
        #    basically mode_ground_truch represent the probability, its ground truth value is either 0 or 1. 
        mode_ground_truch = torch.zeros(trip_data_embeding.shape[0],  num_embeddings_mode)
        #models_labels is obtained via self.mode_convert(data_array0[:, -1]).astype(np.float32)
        #mode_ground_truch[i+1, mode_ground_truch] = 1.0
        for i,label in enumerate(mode_labels_of_data):
            #print(i, label)
            mode_ground_truch[i+1, label] = 1.0
        #
        #------------------beginging and ending label
        #    Begining is labeled as 5, and ending is labeled as 6. 
        mode_ground_truch[0, 5] = 1.0
        mode_ground_truch[-1, 6] = 1.0
        #
        #------------------Groundtruth shape is (N+2,  3+num_embeddings_mode)
        groundtruth = torch.zeros(trip_data_embeding.shape[0],  3 + num_embeddings_mode)
        groundtruth[:, :3].copy_(trip_data_embeding[:, :3])
        groundtruth[:, 3:].copy_(mode_ground_truch)
        
        #trip_data_embeding shape is (N+2,  3 + embedding_dim)
        #Groundtruth shape is (N+2,  3 + num_embeddings_mode)
        return trip_data_embeding,groundtruth
        
        """
        #=============================================Concat
        #tmp1, tmp2 and tmp3 corresponds to moments, lons, lat respectively.
        tmp1 = torch.tensor(moments_sec).view(-1, 1)#
        tmp2 = torch.tensor(lons.astype(np.float32)).view(-1, 1)
        tmp3 = torch.tensor(lats.astype(np.float32)).view(-1, 1)
        #embedding_without_trajectoryending is 2d tensor with shape (N+1, 3+mode_embeddings)
        #print(tmp1.shape, tmp2.shape, tmp3.shape)
        embedding_without_trajectoryending =  torch.cat((tmp1, tmp2, tmp3, mode_embeddings), dim=1)
        
        #trip_data_embeding shape is (N+2,  3 + embedding_dim)
        trip_data_embeding = torch.cat((begining_tensor.view(1, -1), embedding_without_trajectoryending, ending_tensor.view(1, -1)), dim=0)
        
        #=====================================Ground truth.
        #-------------------------mode ground truth
        #        transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
        #        modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }, one begging, one ending and a NaN.  TOTAL 8
        #    basically mode_ground_truch represent the probability, its ground truth value is either 0 or 1. 
        mode_ground_truch = torch.zeros(trip_data_embeding.shape[0],  num_embeddings_mode)
        #models_labels is obtained via self.mode_convert(data_array0[:, -1]).astype(np.float32)
        #mode_ground_truch[i+1, mode_ground_truch] = 1.0
        for i,label in enumerate(mode_labels_of_data):
            #print(i, label)
            mode_ground_truch[i+1, label] = 1.0
        #
        #------------------beginging and ending label
        #    Begining is labeled as 5, and ending is labeled as 6. 
        mode_ground_truch[0, 5] = 1.0
        mode_ground_truch[-1, 6] = 1.0
        #
        #------------------Groundtruth shape is (N+2,  3+num_embeddings_mode)
        groundtruth = torch.zeros(trip_data_embeding.shape[0],  3 + num_embeddings_mode)
        groundtruth[:, :3].copy_(trip_data_embeding[:, :3])
        groundtruth[:, 3:].copy_(mode_ground_truch)
        
        #trip_data_embeding shape is (N+2,  3 + embedding_dim)
        #Groundtruth shape is (N+2,  3 + num_embeddings_mode)
        return trip_data_embeding,groundtruth
        """
        
        
   
    
    @classmethod
    def Trajec2EmbeddingWithLabel(self, data_array0, num_embeddings_mode = 8, mode_embedding_dim = 10, shift_lonlat = False):
        """  
        Callback:
        
            trip_data_embeding,groundtruth = DP.TripDataProcess.Trajec2EmbeddingWithLabel(data_array0)
        
        DIfference:
        
            self.Trajec2Embedding
            self.Trajec2EmbeddingWithLabel
        
        The latter one returns the ground truth label either. 
        
        Callback:
            
            #tripdata is a pandas. 
            tripdata = pd.read_csv(path_tmp + '4201120.csv')
            #data_array0 shape is (N,4), N is the data points number, '4' is for time,lon,lat,mode
            data_array = tripdata.iloc[:,2:].values
            #
            tripdp = DP.TripDataProcess()
            #eb is the output, shape is (N+2, 3+mode_embedding_dim)
            eb,groundtruth = tripdp.Trajec2EmbeddingWithLabel(data_array)
        
        -----------------------------------------------
        Convert each trajectory to the embedding. 
        -----------------------------------------------
        @data_array: data_array0
        
            The format is :
            
                array([['2017-01-12 04:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 05:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 06:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 07:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 07:40:00', 139.58046804, 35.69133921, 2.0],
                       ['2017-01-12 07:43:40', 139.58033062, 35.6898477, 2.0],
                       ['2017-01-12 07:44:10', 139.58131812, 35.68815623, 2.0],
                       ['2017-01-12 07:45:10', 139.58591766, 35.68986446, 2.0],
                       ['2017-01-12 07:45:20', 139.58643011, 35.69004779, 2.0]],
                      dtype=object)
            
            The columns are moment,lon,lat, mode. Their format are str,float,float,float.
        
        @input: num_embeddings_mode
        
            '8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        
        @input: mode_embedding_dim
        
            the dimeision of the embeding of mode. 
            
            For a trip data with t,lon,lat, the num of dimension of trip data is mode_embedding_dim+3, '3' is for t,lon,lat respectively.
        
        @OUTPUT: trajectory_embedding,groundtruth
            
            trajectory_embedding shape is (N+2,  3 + embedding_dim). 
                '3+' are moment, lon and lat while embedding_dim is for mode.
                N is the number of original data points.
                '+2' is because we append a start and ending to the data. 
            
            groundtruth is the shape of (N+2,  3 + num_embeddings_mode)
            
                'num_embeddings_mode' is because the network output the probability of the each mode (including 5 modes, with one begining, one ending and one NaN)
                
                '3+' are moment, lon and lat while embedding_dim is for mode.
                
                N is the number of original data points.
        
            THe labeling system of mode is as follows:
            
                transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
                modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }
        
        """
        #mode_embedding is an instance.
        #    to use it, simply ModeEmbedding(torch.LongTensor([1]))
        ModeEmbedding = nn.Embedding(num_embeddings_mode,mode_embedding_dim)
        
        #===========================================begin array and ending tensor,
        #                                            ending_tensor is used to indicate the ending of a trajectory.
        #start_t,start_lon,start_lat  are floats.
        start_t,start_lon,start_lat = data_array0[0, 0],float(data_array0[0, 1]), float(data_array0[0, 2])
        #
        #begin_point = np.array([start_t, start_lon, start_lat, 5.0])
        #    convert to 2d array. 
        #begin_point = begin_point.reshape(1, -1)
        #'5' is the 
        begining_tensor0 = torch.tensor(np.array([.0, .0, .0]))
        #    begining_tensor is a 1d tensor, shape is (3+embedding_dim)
        begining_tensor = torch.cat((begining_tensor0, ModeEmbedding(torch.LongTensor([5]))[0]), dim=0)
        #return begining_tensor
        #    the former three variables, i.e. time,lon,lat
        ending_tensor0 = torch.tensor(np.array([.0, .0, .0]))
        #    the embeddgin for the mode (which represent the ending of the trajectory) is ModeEmbedding(torch.LongTensor([7]))[0]
        #    ending_tensor is a 1d tensor with shape (3+embedding_dim)
        ending_tensor = torch.cat((ending_tensor0, ModeEmbedding(torch.LongTensor([7]))[0]), dim=0)
        #return ending_tensor
        #data_array = np.concatenate((begin_point, data_array0), axis=0)
        #builtins.tmp = data_array
        
        #=============================================convert moment to sec, and SHIFT.
        #    each m in moment is like: datetime.datetime(2017, 1, 12, 4, 0)
        moments = [datetime.strptime(t, '%Y-%m-%d %H:%M:%S') for t in data_array0[:, 0]]
        #    float.
        moments_sec = [m.timestamp() for m in moments]
        moments_sec = np.array(moments_sec) - min(moments_sec)
        #
        
        #============================================SHIFT the lon and lat
        #print(start_t,start_lon,start_lat)
        #builtins.tmp = data_array[:, 1]
        if shift_lonlat:
            lons = data_array0[:, 1] - start_lon
            lats = data_array0[:, 2] - start_lat
        else:
            lons = data_array0[:, 1]
            lats = data_array0[:, 2]
        
        #===============================================modes embedin.
        #mode_embeddings shape is (data_array.shape[0], mode_embedding_dim)
        #builtins.tmp  = self.mode_convert(data_array0[:, -1])
        mode_labels_of_data = self.mode_convert(data_array0[:, -1])
        mode_embeddings = ModeEmbedding(torch.LongTensor(mode_labels_of_data))
        
        #=============================================Concat
        #tmp1, tmp2 and tmp3 corresponds to moments, lons, lat respectively.
        tmp1 = torch.tensor(moments_sec).view(-1, 1)#
        tmp2 = torch.tensor(lons.astype(np.float32)).view(-1, 1)
        tmp3 = torch.tensor(lats.astype(np.float32)).view(-1, 1)
        #embedding_without_trajectoryending is 2d tensor with shape (N+1, 3+mode_embeddings)
        #print(tmp1.shape, tmp2.shape, tmp3.shape)
        embedding_without_trajectoryending =  torch.cat((tmp1, tmp2, tmp3, mode_embeddings), dim=1)
        
        #trip_data_embeding shape is (N+2,  3 + embedding_dim)
        trip_data_embeding = torch.cat((begining_tensor.view(1, -1), embedding_without_trajectoryending, ending_tensor.view(1, -1)), dim=0)
        
        #=====================================Ground truth.
        #-------------------------mode ground truth
        #        transport: STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)
        #        modes_dict = {99.0:.0, 1.0:1.0, 2.0:2.0, 3.0:3.0, 4.0:4.0, }, one begging, one ending and a NaN.  TOTAL 8
        #    basically mode_ground_truch represent the probability, its ground truth value is either 0 or 1. 
        mode_ground_truch = torch.zeros(trip_data_embeding.shape[0],  num_embeddings_mode)
        #models_labels is obtained via self.mode_convert(data_array0[:, -1]).astype(np.float32)
        #mode_ground_truch[i+1, mode_ground_truch] = 1.0
        for i,label in enumerate(mode_labels_of_data):
            #print(i, label)
            mode_ground_truch[i+1, label] = 1.0
        #
        #------------------beginging and ending label
        #    Begining is labeled as 5, and ending is labeled as 6. 
        mode_ground_truch[0, 5] = 1.0
        mode_ground_truch[-1, 6] = 1.0
        #
        #------------------Groundtruth shape is (N+2,  3+num_embeddings_mode)
        groundtruth = torch.zeros(trip_data_embeding.shape[0],  3 + num_embeddings_mode)
        groundtruth[:, :3].copy_(trip_data_embeding[:, :3])
        groundtruth[:, 3:].copy_(mode_ground_truch)
        
        #trip_data_embeding shape is (N+2,  3 + embedding_dim)
        #Groundtruth shape is (N+2,  3 + num_embeddings_mode)
        return trip_data_embeding,groundtruth
   
   
    @classmethod
    def Trajec2Embedding(self, data_array0, num_mode_classification = 8, mode_embedding_dim = 10, ):
        """  
        Callback:
            
            #tripdata is a pandas. 
            tripdata = pd.read_csv(path_tmp + '4201120.csv')
            #data_array0 shape is (N,4), N is the data points number, '4' is for time,lon,lat,mode
            data_array = tripdata.iloc[:,2:].values
            #
            tripdp = DP.TripDataProcess()
            #eb is the output, shape is (N+2, 3+mode_embedding_dim)
            eb = tripdp.Trajec2Embedding(data_array)
        
        -----------------------------------------------
        Convert each trajectory to the embedding. 
        -----------------------------------------------
        @data_array: data_array0
        
            The format is :
            
                array([['2017-01-12 04:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 05:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 06:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 07:00:00', 139.58298017, 35.69493887, 99.0],
                       ['2017-01-12 07:40:00', 139.58046804, 35.69133921, 2.0],
                       ['2017-01-12 07:43:40', 139.58033062, 35.6898477, 2.0],
                       ['2017-01-12 07:44:10', 139.58131812, 35.68815623, 2.0],
                       ['2017-01-12 07:45:10', 139.58591766, 35.68986446, 2.0],
                       ['2017-01-12 07:45:20', 139.58643011, 35.69004779, 2.0]],
                      dtype=object)
            
            The columns are moment,lon,lat, mode. Their format are str,float,float,float.
        
        @input: num_embeddings_mode
        
            '8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        
        @input: mode_embedding_dim
        
            the dimeision of the embeding of mode. 
            
            For a trip data with t,lon,lat, the num of dimension of trip data is mode_embedding_dim+3, '3' is for t,lon,lat respectively.
        
        @OUTPUT: trajectory_embedding
        
            shape is (N+2,  3 + embedding_dim). '3' are moment, lon and lat while embedding_dim is for mode.
            N is the number of original data points.
            +2 is because we append a start and ending to the data. 
        
        """
        #mode_embedding is an instance.
        #
        #    to use it, simply ModeEmbedding(torch.LongTensor([1]))
        ModeEmbedding = nn.Embedding(num_mode_classification,mode_embedding_dim)
        
        #===========================================begin array and ending tensor,
        #                                            ending_tensor is used to indicate the ending of a trajectory.
        start_t,start_lon,start_lat = data_array0[0, 0],float(data_array0[0, 1]), float(data_array0[0, 2])
        #
        #begin_point = np.array([start_t, start_lon, start_lat, 5.0])
        #    convert to 2d array. 
        #begin_point = begin_point.reshape(1, -1)
        #'5' is the 
        begining_tensor0 = torch.tensor(np.array([.0, .0, .0]))
        begining_tensor = torch.cat((begining_tensor0, ModeEmbedding(torch.LongTensor([5]))[0]), dim=0)
        #    the former three variables, i.e. time,lon,lat
        ending_tensor0 = torch.tensor(np.array([.0, .0, .0]))
        #    the embeddgin for the mode (which represent the ending of the trajectory) is ModeEmbedding(torch.LongTensor([7]))[0]
        #    ending_tensor is a 1d tensor with shape (3+embedding_dim)
        ending_tensor = torch.cat((ending_tensor0, ModeEmbedding(torch.LongTensor([7]))[0]), dim=0)
        
        #data_array = np.concatenate((begin_point, data_array0), axis=0)
        #builtins.tmp = data_array
        #=============================================convert moment to sec, and shift.
        #    each m in moment is like: datetime.datetime(2017, 1, 12, 4, 0)
        moments = [datetime.strptime(t, '%Y-%m-%d %H:%M:%S') for t in data_array0[:, 0]]
        #    float.
        moments_sec = [m.timestamp() for m in moments]
        moments_sec = np.array(moments_sec) - min(moments_sec)
        #
        
        #============================================convert the lon and lat
        #print(start_t,start_lon,start_lat)
        #builtins.tmp = data_array[:, 1]
        lons = data_array0[:, 1] - start_lon
        lats = data_array0[:, 2] - start_lat
        #
        
        #===============================================modes
        #mode_embeddings shape is (data_array.shape[0], mode_embedding_dim)
        #builtins.tmp  = self.mode_convert(data_array0[:, -1])
        mode_embeddings = ModeEmbedding(torch.LongTensor(self.mode_convert(data_array0[:, -1]).astype(np.float32)))
        
        #=============================================Concat
        tmp1 = torch.tensor(moments_sec).view(-1, 1)#
        tmp2 = torch.tensor(lons.astype(np.float32)).view(-1, 1)
        tmp3 = torch.tensor(lats.astype(np.float32)).view(-1, 1)
        #embedding_without_trajectoryending is 2d tensor with shape (N+1, 3+mode_embeddings)
        #print(tmp1.shape, tmp2.shape, tmp3.shape)
        embedding_without_trajectoryending =  torch.cat((tmp1, tmp2, tmp3, mode_embeddings), dim=1)
        #
        return torch.cat((begining_tensor.view(1, -1), embedding_without_trajectoryending, ending_tensor.view(1, -1)), dim=0)
        
    @classmethod
    def tripdata2embedding():
        """
        
        """
        
        
        
        pass
    
    
    
    
    pass







class MapDataProcess():
    """
    
    """
    
    @classmethod
    def POS_embedding(self, POS_vector, dim_embedding4pos = 4):
        """
        Callback:
            
            PE = DP.MapDataProcess.POS_embedding(POS_vector, dim_embedding4pos = 3)
            #PE_even,PE_odd = DP.MapDataProcess.POS_embedding(POS_vector, dim_embedding4pos = 3)
            #POS_mat,INDEX_mat = DP.MapDataProcess.POS_embedding(POS_vector, dim_embedding4pos = 3)
            
        ---------------------------------
        
        @input: POS_vector
        
            a 1d array with shape (N_points, ), where N_points is the 
        
        @OUTPUT: PE
        
            a tensor with shape (dim_embedding4pos, N_points)
        
        """
        N_points = len(POS_vector)
        
        #POS_mat shape is (dim_embedding4pos, N_points)
        POS_mat = np.repeat(POS_vector, dim_embedding4pos).reshape( N_points, -1).T
        
        #INDEX_mat
        INDEX_mat = np.repeat(range(dim_embedding4pos), N_points).reshape(-1, N_points)
        #
        #POS_mat,INDEX_mat = DP.MapDataProcess.POS_embedding(POS_vector, dim_embedding4pos = 3)
        #return POS_mat,INDEX_mat
        #
        odd_select = .0*INDEX_mat
        odd_select[INDEX_mat % 2 != 0] = 1
        
        #
        PE_even = torch.sin(torch.tensor(POS_mat)/(torch.pow(torch.tensor(10000.0), 2.0*torch.tensor(INDEX_mat)/dim_embedding4pos)))
        PE_odd = torch.cos(torch.tensor(POS_mat)/(torch.pow(torch.tensor(10000.0), 2.0*torch.tensor(INDEX_mat)/dim_embedding4pos)))
        
        return PE_even*(1-odd_select) + PE_odd*odd_select
    
    @classmethod
    def edges_gdf2embedding(self, edges_gdf,  offsets = np.array([.0, .0]), ):
        """
        Callback:
            
            COORS_mat,POS_mat = DP.MapDataProcess.edges_gdf2embedding(edges_gdf,  offsets = np.array([.0, .0]))
        
        -------------------------------------------
        @input: dim_embedding4pos
        
            the dim for the embeding.
            
            The returned POS_mat
        
        @input: edges_gdf
            
            type(edges) returns geopandas.geodataframe.GeoDataFrame.
            
            edges.columns
            
                Index(['osmid', 'oneway', 'reversed', 'length', 'name', 'lanes', 'highway',
                       'maxspeed', 'geometry'],
                      dtype='object')
            
            Can be obtained via:
            
                path_tmp = dataspath + 'processed/4201120/'
                roadnet = ox.graph_from_xml(path_tmp + '4201120.osm')
                nodes, edges_gdf = ox.graph_to_gdfs(roadnet, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True)
            
            The edges can be accedssed via:
                
                #geometries_series type is a geopandas.geoseries.GeoSeries
                geometries_series = edges.geometry
                #geometries_series.index[0] returns (2550138371, 2550138372, 0)
        
        
        @input: offsets
            
            The offsets for each dim.
            
            the offsets used to offset the coordinate..
            
            The returned coordinate should be:
            
                raw_coordinate + offsets
        
        @OUTPUT: COORS_mat,POS_mat
            
            COORS_mat is an 2d array of shape (dim_point, N_points_all). N_points_all is the number of all points in the whold network (represented by the aggregation of all edges).
            
            POS_vector is a 1d array with shape (, N_points_all). each columns have the same value.
            POS_vector is used as postiion embedding for edges_mat
            
            POS_mat is an 1d array of shape (, N_points_all). each columns have the same value.
            POS_mat is used as postiion embedding for edges_mat
            
                the values in the pos_mat equals the number of neighboring edges. 
        
        """
        N_edges = edges_gdf.shape[0]
        #dim_point is the dimension of a point. 
        dim_point = len(offsets)
        
        #edges_coors is a dict of dict,  edges_coors[start][end] is a 2d array with shape (dim_point, N_points_of_this_edge) 
        edges_coors = self.edges_gdf2coors(edges_gdf =  edges_gdf, offsets = offsets)
        
        #
        ListOfArrays = []
        ListOfPos  = []#each element is [n,n,n,..]. lengh is the 
        #
        n_neighboring_edges = 0
        for u in edges_coors.keys():
            """
            
            """
            for v in edges_coors[u].keys():
                n_points = edges_coors[u][v].shape[1]
                ListOfArrays.append(edges_coors[u][v])
                ListOfPos.extend([n_neighboring_edges]*n_points)
                n_neighboring_edges = n_neighboring_edges +1
        #
        #builtins.tmp = ListOfArrays
        #
        N_points = sum([i.shape[1] for i in ListOfArrays])
        #COORS_mat is formed 
        COORS_mat =  .0*np.zeros((2, N_points), dtype=int)
        col_offset = 0
        for arr in ListOfArrays:
            #for row in range(arr.shape[0]):
            #print(row, col_offset, col_offset+arr.shape[1], arr[row, :].shape)
            COORS_mat[:, col_offset:col_offset+arr.shape[1]] = arr[:, :]
            col_offset += arr.shape[1]
        #
        
        #POS_vector shape is (dim_point, )
        #    np.repeat(offsets, N_points).reshape(N_points, -1)
        POS_vector = np.array(ListOfPos)
        #POS_vector = np.repeat(ListOfPos, dim_embedding4pos).reshape(N_points, -1)
        
        return COORS_mat,POS_vector
    
    @classmethod
    def edges_gdf2coors(self, edges_gdf, offsets = np.array([.0, .0])):
        """
        Callback:
        
            edges_coors =  = DP.MapDataProcess.edges_gdf2embedding(edges_gdf,  offsets = np.array([.0, .0]))
        
        --------------------------------------------------
        @input: edges_gdf
            
            type(edges) returns geopandas.geodataframe.GeoDataFrame.
            
            edges.columns
            
                Index(['osmid', 'oneway', 'reversed', 'length', 'name', 'lanes', 'highway',
                       'maxspeed', 'geometry'],
                      dtype='object')
            
            Can be obtained via:
            
                path_tmp = dataspath + 'processed/4201120/'
                roadnet = ox.graph_from_xml(path_tmp + '4201120.osm')
                nodes, edges_gdf = ox.graph_to_gdfs(roadnet, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True)
            
            The edges can be accedssed via:
                
                #geometries_series type is a geopandas.geoseries.GeoSeries
                geometries_series = edges.geometry
                #geometries_series.index[0] returns (2550138371, 2550138372, 0)
        
        
        @input: offsets
            
            The offsets for each dim.
            
            the offsets used to offset the coordinate..
            
            The returned coordinate should be:
            
                raw_coordinate + offsets
        
        @OUTPUT: edges_coors
            
            edges_coors[start][end] = 2d array, shape is (2, N), where N is the number of points, and '2' means xy or lon,lat
                    
        """
        #edges_coors[start][end]
        edges_coors = {}
        
        #
        geometries_series = edges_gdf.geometry
        #
        for indexx in geometries_series.index:
            u,v,_ = indexx
            
            #geometries_series[indexx] is a LineString
            #for i in geometries_series[indexx].coords:print(i)
            #returns elements such as (139.702998, 35.658878)
            #    np.array(geometries_series[indexx].coords) shape is (N_points, dim_each_element)
            #
            raw_coords = np.array(geometries_series[indexx].coords)
            N_points =  raw_coords.shape[0]
            #OFFSETS shape is (N_points, len(offsets))
            OFFSETS = np.repeat(offsets, N_points).reshape(N_points, -1)
            coords = np.array(geometries_series[indexx].coords) + OFFSETS
            if u in edges_coors:
                #raw_coords shape is 
                edges_coors[u][v] = coords.T
            else:
                edges_coors[u]= {v:coords.T}
            pass
        
        return edges_coors
        
class DataProcess():
    """
    
    """
    
    
    
    
    
    @classmethod
    def InputOutputProcess(self, osm_path, trip_data_path, dim_embedding_map  = 6, mode_embedding_dim = 3, num_mode_classification = 8, ):
        """
        Note the dimeison:
        
            after embeding, the osm map data is embedded as shape (N_points_in_map, dim_embedding_map)
            
            after embeding, the trip data is embedded as (N_moments_in_trip_data+2, mode_embedding_dim +3), '2' is for begining and ending of the trip, and '3' is for t,lon,lat respectively. 
        
        THEREFORE, if the ebneding of map and trip is required the same, the following equalities should saisfy:
        
            dim_embedding_map = mode_embedding_dim + 3
        
        Callback:
        
            trip_eb,COORS_mat,PositionEmbeding = DataProcess.InputOutputProcess(osm_path, trip_data_path)
            
        ----------------------------------------------------------------------
        @input: dim_embedding_map
        
            the dimension for the map embedding. 
            
            Each point of the road network is embedded with such dimension.
            
            
        @input: num_mode_classification
        
            '8' is because we have 5 modes (STAY-99, WALK-1, VEHICLE-2, TRAIN-3, BICYCLE-4(int)), one begging, one ending and a NaN.  
        
        @input: mode_embedding_dim
        
            the dimeision of the embeding of mode. 
            
            For a trip data with t,lon,lat, the num of dimension of trip data is mode_embedding_dim+3, '3' is for t,lon,lat respectively.
        
        @OUTPUT: trip_eb,COORS_mat,PositionEmbeding
        
            trip_eb shape is (N_moments_in_trip_data+2, mode_embedding_dim+3):
                '+2' is for begining and ending of trip
                '3' is for t,lon,lat respectively.
                
            COORS_mat is 2d tensor with shape (2, N_points_in_whole_network)
            
            PositionEmbeding is a 2d tensor with shape (dim_embedding_map, N_points_in_whole_network)
        
        """
        #===============================================trip dataset
        tripdata = pd.read_csv(trip_data_path)
        #data_array0 shape is (N_moments_in_trip_data, 4), '4' is for t,lon,lat,mode
        data_array0 = tripdata.iloc[:,2:].values
        start_t,start_lon,start_lat = data_array0[0, 0],float(data_array0[0, 1]), float(data_array0[0, 2])
        #
        #trip_eb shape is (N_moments_in_trip_data+2, mode_embedding_dim+3)
        trip_eb = TripDataProcess.Trajec2Embedding(tripdata.iloc[:,2:].values, num_mode_classification = num_mode_classification, mode_embedding_dim = mode_embedding_dim)
        
        #================================================map data
        #roadnet type is networkx.classes.multidigraph.MultiDiGraph
        roadnet = ox.graph_from_xml(osm_path)
        
        #
        #edges are the geopandas frame.
        nodes, edges = ox.graph_to_gdfs(roadnet, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True)
        #
        #COORS_mat is a tensor with shape (2, N_points,)
        COORS_mat,POS_vector = MapDataProcess.edges_gdf2embedding(edges_gdf  = edges,  offsets = np.array([-start_lon, -start_lat]))
        #PositionEmbeding is a 2d tensor with shape (dim_embedding, N_points)
        PositionEmbeding  = MapDataProcess.POS_embedding(POS_vector, dim_embedding4pos = dim_embedding_map)
        
        #convert to default torch.float32
        return trip_eb.float(),torch.tensor(COORS_mat).float(),PositionEmbeding.float()
        
    @classmethod
    def TranslateNetwork(self, osm_file_path, deltax_meter = 1000.0, deltay_meter = 1000.0):
        """
        Translate the road network by deltax and deltay.
        
        @input: osm_file_path
        
            a str specify the 
        
        """
        #
        net = og.getNetFromFile(osm_file_path, network_types="auto", )
        
        
        pass
    
    
    
    
    @classmethod
    def ReadTrajetories(self, data_pd):
        """
        
        """
        
        
        
        pass
    

    @classmethod
    def Convert_osm2graph():
        """
        
        """
        
        
        
        pass

