
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
from utils.norm_feature import FeatureNorm
from utils.prepare_feature import transform2_lagged_feature_md, multi_step_y_md, postprocess
from utils.timefeatures import time_features

# dir_name = "ospline_big"
dir_name = "liner_overfit"
data_dir = r"DataSet/datasets/" + dir_name

columns = ['date', 'mmsi', 'sog', 'lng', 'lat', 'cog', 'deltatime', 'deltalng', 'deltalat']
# input_columns = ['deltalng', 'deltalat', 'lng', 'lat', 'sog', 'cog']
# output_columns = ['lng', 'lat', 'sog', 'cog']

input_columns = ['deltatime', 'deltalng', 'deltalat', 'sog', 'cog']
# input_columns = ['deltalng', 'deltalat']
output_columns = ['deltatime', 'deltalng', 'deltalat', 'sog', 'cog']
# output_columns = ['deltalng', 'deltalat']


feature_norm = FeatureNorm()
weights_dir = "weights/"+dir_name

input_seq_length = 40
output_seq_length = 20


# 创建处理后数据保存的文件夹
folder_lists = ['train_x', 'train_y', 'valid_x', 'valid_y']
for fl in folder_lists:
    temp_dir = "DataSet/datasets/{}_npy/{}".format(dir_name, fl)
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)

train_couter = 0
valid_couter = 0

for root, dirs, files in os.walk(data_dir):
    for name in tqdm(files):
        csv_path = os.path.join(root, name)

        temp_data = pd.read_csv(csv_path, names=columns)

        # df_stamp = temp_data[['date']]
        # df_stamp['date'] = pd.to_datetime(df_stamp.date)

        # 处理mask
        # data_stamp = time_features(df_stamp, timeenc=1, freq='t')
        # mask_x = transform2_lagged_feature_md(data_stamp, window_sizes=input_seq_length, out_windows_size=output_seq_length)
        # mask_y = transform2_lagged_feature_md(data_stamp, predict_window=output_seq_length, input_window=input_seq_length)

        temp_data = temp_data[input_columns]
        x = feature_norm(temp_data, model_dir=weights_dir, mode='val')
        y = x[output_columns]

        # 滑动窗口取x
        x = transform2_lagged_feature_md(x, window_sizes=input_seq_length, out_windows_size=output_seq_length)
        y = multi_step_y_md(y, predict_window=output_seq_length, input_window=input_seq_length)

        assert x.shape[0] == y.shape[0]
        x, y = postprocess(x, y)

        # 判断是 train 还是 valid
        train_or_valid = root.split("\\")[-1]
        if train_or_valid == 'train':
            # 保存
            x_dir = "DataSet/datasets/{}_npy/train_x".format(dir_name)
            np.save("{}/{}.npy".format(x_dir, train_couter), x)
            y_dir = "DataSet/datasets/{}_npy/train_y".format(dir_name)
            np.save("{}/{}.npy".format(y_dir, train_couter), y)

            train_couter += 1

        elif train_or_valid == 'valid':
            # 保存
            x_dir = "DataSet/datasets/{}_npy/valid_x".format(dir_name)
            np.save("{}/{}.npy".format(x_dir, valid_couter), x)
            y_dir = "DataSet/datasets/{}_npy/valid_y".format(dir_name)
            np.save("{}/{}.npy".format(y_dir, valid_couter), y)

            valid_couter += 1

    print("Done...")