import tensorflow as tf
import numpy as np
import csv
import time
import os
from tqdm import tqdm



class ShipLoader():
    def __init__(self, root_path, flag='train'):
        super().__init__()
        # init
        assert flag in ['train', 'test', 'valid']
        self.flag = flag
        self.root_path = root_path
        # self.data_path = data_path
        self.x = []
        self.y = []


    def row2array(self, row):
        """Convert a row of csv to a trajecotry point.
        
        Args:
            row (list): a row of csv file.

        Returns:
            np.array: point: delta_time(ms), delta_lng, delta_lat, sog, cog
        """        
        array = np.array([row[6], row[7], row[8], row[2], row[5]], np.float32)
        return array


    def loadShipData(self):
        """Load ship data for LSTM/BP network. 

        trajectory.shape = [N, 5]
        The trajectories are converted to np.array([N, 5]), normalized and stored in self.trajectory. 
        (including break points, whose point[0] == 0.0)

        Args:
            file_name (string): file name of the csv.
        """        
        # self.data_dir = data_dir
        # points_list = []
        # with open(self.file_name, 'r') as f:
        #     reader = csv.reader(f)
        #     for row in reader:
        #         point = self.row2array(row)
        #         points_list.append(point)
        
        # self.trajectory = np.array(points_list)

        # # normalization: (x - min) / (max - min)
        # self.max_train_data, self.min_train_data = self.trajectory.max(axis = 0), self.trajectory.min(axis = 0)
        # self.trajectory = (self.trajectory - self.min_train_data) / (self.max_train_data - self.min_train_data)

        # 遍历 root_path 下面的每条数据
        x_path = os.path.join(self.root_path, "{}_x".format(self.flag))
        for root, dirs, files in os.walk(x_path):
            for name in files:
                npy_path = os.path.join(root, name)
                self.x.append(npy_path)

        y_path = os.path.join(self.root_path, "{}_y".format(self.flag))
        for root, dirs, files in os.walk(y_path):
            for name in files:
                npy_path = os.path.join(root, name)
                self.y.append(npy_path)

        self.length = len(self.x)
        self.index = 0

    
    def getBatchBP(self, batch_size, bp_step):
        """Get a batch of trajectory in bp_step and the point to predict for BP model training.

        Args:
            batch_size (int): the size of mini-batch.
            bp_step (int): the length of trajectory sequence.

        Returns:
            seq: shape of [batch_size, bp_step, 5].
            next_point: shape of [batch_size, 5].
        """           
        seq = []
        next_point = []
        
        for i in range(batch_size):
            seq_temp = []
            # make sure the suquence is continuous.
            is_valid = False
            while not is_valid:
                index = np.random.randint(0, len(self.trajectory) - bp_step)
                seq_temp = self.trajectory[index: index + bp_step]
                is_valid = True
                for point in seq_temp:
                    if point[0] == 0.0:
                        is_valid = False
                        break
            seq.append(seq_temp)
            next_point.append(self.trajectory[index + bp_step]) 
        # array(seq).shape: [batch_size, bp_step, 5], array(next_point).shape: [batch_size, 5]
        return np.array(seq), np.array(next_point)


    def getBatchLSTM(self, batch_size, seq_length):
        """Get a batch of trajectory in seq_length and the point to predict for LSTM model training.

        Args:
            batch_size (int): the size of mini-batch.
            seq_length (int): the length of trajectory sequence.

        Returns:
            seq: shape of [batch_size, seq_length, 5]. 
            next_point: shape of [batch_size, 5].
        """        
        seq = []
        next_point = []
        
        for i in range(batch_size):
            seq_temp = []
            # make sure the suquence is continuous.
            is_valid = False
            while not is_valid:
                index = np.random.randint(0, len(self.trajectory) - seq_length)
                seq_temp = self.trajectory[index: index + seq_length]
                is_valid = True
                for point in seq_temp:
                    if point[0] == 0.0:
                        is_valid = False
                        break
            seq.append(seq_temp)
            next_point.append(self.trajectory[index + seq_length]) 
        # array(seq).shape: [batch_size, seq_length, 5], array(next_point).shape: [batch_size, 5]
        return np.array(seq), np.array(next_point)
    

    def getBatchSeq2Seq(self):
        """Get a batch of trajectory in encoder_length + decoder_length to predict for seq2seq model training.

        Args:
            batch_size (int): the size of mini-batch.
            encoder_length (int): the length of source trajectory sequence for encoder.
            decoder_length (int): the length of destination trajectory sequence for decoder.

        Returns:
            seq_encoder: shape of [batch_size, encoder_length, 5].
            seq_decoder: shape of [batch_size, decoder_length+1, 5].
        """        
        if self.index >= self.length:
            self.index = 0
            return None, None

        x_npy_path = os.path.join(self.root_path, "{}_x/{}.npy".format(self.flag, self.index))
        x = np.load(x_npy_path)
        y_npy_path = os.path.join(self.root_path, "{}_y/{}.npy".format(self.flag, self.index))
        y = np.load(y_npy_path)

        n_example = np.random.choice(a=x.shape[0], size=64, replace=False)

        x = x[n_example]
        y = y[n_example]


        self.index += 1

        return x, y


    def get_all_data(self, flag, dir_name="ospline_overfit"):
        self.x = []
        self.y = []
        self.loadShipData()

        data_x = None
        data_y = None

        # x_dir = "DataSet/datasets/liner_npy/{}_x.npy".format(flag)
        # y_dir = "DataSet/datasets/liner_npy/{}_y.npy".format(flag)

        x_dir = "DataSet/datasets/{}_npy/{}_x.npy".format(dir_name, flag)
        y_dir = "DataSet/datasets/{}_npy/{}_y.npy".format(dir_name, flag)

        if os.path.exists(x_dir):
            print(x_dir)
            data_x = np.load(x_dir)
        else:
            for a in tqdm(range(len(self.x))):
                # 用于测试
                # if a > 100:
                #     break
                x_npy_path = os.path.join(self.root_path, "{}_x/{}.npy".format(flag, a))
                temp_x = np.load(x_npy_path)
                if isinstance(data_x, np.ndarray):
                    data_x = np.concatenate((data_x, temp_x), axis=0)
                else:
                    data_x = temp_x

        if os.path.exists(y_dir):
            data_y = np.load(y_dir)
        else:
            for a in tqdm(range(len(self.y))):
                y_npy_path = os.path.join(self.root_path, "{}_y/{}.npy".format(flag, a))
                temp_y = np.load(y_npy_path)
                if isinstance(data_y, np.ndarray):
                    data_y = np.concatenate((data_y, temp_y), axis=0)
                else:
                    data_y = temp_y

        # 保存
        np.save(x_dir, data_x)
        np.save(y_dir, data_y)

        print("data_x.shape: {}, data_y.shape: {}".format(data_x.shape, data_y.shape))
        return data_x, data_y



if __name__ == '__main__':
    x = ShipLoader()
    start = time.clock()
    x.loadTrajectoryData("./DataSet/TrajectoryMillion.csv")
    end = time.clock()
    print("running time: %s s" % (end-start))
    seq = x.getBatchSeq2Seq(1024, 40, 20)

