import os
import glob
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from itertools import groupby
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from xgboost import XGBClassifier


def load_single(args):
    data_paths = glob.glob(os.path.join(args.root_path, '*'))
    input_paths = [p for p in data_paths if os.path.isfile(p) and p.endswith('.npy')]
    tqdm_seqs = tqdm(input_paths, desc='seqs_load', ncols=70)
    datas = []
    for npy in tqdm_seqs:
        # 获取数据类型和形状
        val_data = np.load(npy, allow_pickle=True)
        data_2d = np.array([tuple(record) for record in val_data])
        data_2d = data_2d.reshape(val_data.shape[0], 22)
        data = data_2d[:, [0, 2, 3, 13]]
        datas.append(data)
    all_data_concatenated = np.vstack(datas)

    return all_data_concatenated


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='TimesNet')
    parser.add_argument('--root_path', type=str, default=r'D:\pythonProject\Time-Series-Library-main\结果', help='root path of the data file')
    args = parser.parse_args()  # 解析命令行参数
    trajectories = load_single(args)
    # 创建 StandardScaler 对象
    scaler = StandardScaler()

    # 训练 scaler，计算均值和方差，并对训练数据进行标准化
    train_features_normalized = scaler.fit_transform(trajectories)

    # 获取均值和方差
    mean = scaler.mean_  # 训练数据的均值
    var = scaler.var_  # 训练数据的方差

    # 将均值和方差保存为 DataFrame
    params_df = pd.DataFrame({
        'mean': mean,
        'var': var
    })

    # 保存均值和方差到 CSV 文件
    params_df.to_csv('scaler_params.csv', index=False)
