import os
import joblib
import pandas as pd
import numpy as np
import sys
from utils.norm_feature import FeatureNorm


"""
数据归一并保存
"""

dir_name = "ospline_big"
def normize():
    data_dir = "DataSet/datasets/"+ dir_name+"/train"
    columns = ['date', 'mmsi', 'sog', 'lng', 'lat', 'cog', 'deltatime', 'deltalng', 'deltalat']
    # input_columns = ['deltalng', 'deltalat', 'lng', 'lat', 'sog', 'cog']
    input_columns = ['deltatime', 'deltalng', 'deltalat', 'sog', 'cog']
    # input_columns = ['deltalng', 'deltalat']

    train_data = None
    for root, dirs, files in os.walk(data_dir):
        for name in files:
            f_path = os.path.join(root, name)
            data = pd.read_csv(f_path, names=columns)
            data = data[input_columns]

            if isinstance(train_data, pd.DataFrame):
                train_data = pd.concat([train_data, data])
                print(train_data.shape)
            else:
                train_data = data

    # 开始归一化
    feature_norm = FeatureNorm()
    train_data = feature_norm(train_data, model_dir=r"weights/" + dir_name, mode='train')

'''
ospline
[1.00000000e+03 8.00000000e-05 6.50000000e-05 1.21567045e+02
 3.13473130e+01 1.70821860e+01 3.59999335e+02]
[ 0.0000000e+00 -8.1000000e-05 -6.2000000e-05  1.2145183e+02
  3.1014617e+01  2.3876000e-02  4.0000000e-04]
'''


if __name__ == "__main__":

    # 归一化
    normize()

    # 查看归一化参数
    scaler = joblib.load(os.path.join("weights/"+dir_name, 'scaler.pkl'))

    print(scaler.data_max_)
    print(scaler.data_min_)