#构建dataset的代码,并有反归一化函数
import torch
import numpy as np
import os
import pandas as pd
from torch.utils.data import Dataset
from sklearn.preprocessing import MinMaxScaler
#from Function.create_sequences import create_sequences
def create_sequences(data, sequence_length):
    X, y = [], []
    for i in range(len(data) - sequence_length):
        X.append(data[['latitude', 'longitude', 'baroAltitude', 'geoAltitude', 'timeAtServer']].iloc[i:i+sequence_length].values)
        y.append(data[['latitude', 'longitude','baroAltitude']].iloc[i+sequence_length].values)
    return X, y

class AircraftDataset_new(Dataset):
    def __init__(self, data_folder, sequence_length,train):
        self.data = []
        data_list_X = []  
        data_list_y = []
        
        if os.path.isdir(data_folder):
            print("this is dir")
            # Load data from the CSV files
            total_files = len(os.listdir(data_folder))
            sample_count = 0
            file_paths = [os.path.join(data_folder, f) for f in os.listdir(data_folder) if f.endswith('.csv')]
        elif os.path.isfile(data_folder):
            print("this is file")
            total_files = 1   
            sample_count = 0
            file_paths = [data_folder]
            print("file_paths=",file_paths)

        for file_path in file_paths:
            data = pd.read_csv(file_path)
            if not data.empty:
                X, y = create_sequences(data, sequence_length)
                data_list_X.extend(X)
                data_list_y.extend(y)
                
                sample_count += 1
                completion = sample_count / total_files * 100
                progress_bar = ' {:.2f}% [{}{}]'.format(completion, '*' * int(completion), '.' * (100 - int(completion)))
                print('\r' + progress_bar, end='')
                
        X_np = np.array(data_list_X)
        y_np = np.array(data_list_y)
        self.mm_x = MinMaxScaler()
        self.mm_y = MinMaxScaler()
        X_scaled = self.mm_x.fit_transform(X_np.reshape(-1, X_np.shape[-1])).reshape(X_np.shape)
        y_scaled = self.mm_y.fit_transform(y_np.reshape(-1, y_np.shape[-1])).reshape(y_np.shape)
        X_scaled = [torch.tensor(seq, dtype=torch.float32) for seq in X_scaled]
        y_scaled = [torch.tensor(seq, dtype=torch.float32) for seq in y_scaled]
        self.data = list(zip(X_scaled, y_scaled))
        
        # torch.set_printoptions(precision=10)
        # print(self.data)
        if(train):
            print('\n'+'----------------------Train Dataset Loaded Sucessfully!----------------------')
        else:
            print('\n'+'----------------------Test Dataset Loaded Sucessfully!----------------------')

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]
    
    def unnormalize_x(self, x):  ##77add
        x_np = x.numpy()
        x_original = self.mm_x.inverse_transform(x_np.reshape(-1, x_np.shape[-1])).reshape(x_np.shape)
        return torch.tensor(x_original, dtype=torch.float32)
    
    def unnormalize_y(self, y):
        # 反归一化输出Y
        y_np = y.numpy()  # 转换为numpy数组，如果y是张量
        y_original = self.mm_y.inverse_transform(y_np.reshape(-1, y_np.shape[-1])).reshape(y_np.shape)
        return torch.tensor(y_original, dtype=torch.float32)    
    