import numpy as np
from sklearn.model_selection import train_test_split

def add_noise_to_data(data, snr_db):
    noisy_data = []
    for i in range(data.shape[0]):  # Iterate over each class
        class_data = data[i]  # Shape: [5000, 2, 128]
        for sample in class_data:
            energy = np.sum(sample ** 2)
            noise_power = energy / (10 ** (snr_db / 10))
            noise = np.sqrt(noise_power / 2) * np.random.randn(*sample.shape)
            sample_noisy = sample + noise
            noisy_data.append(sample_noisy)
    return np.array(noisy_data).reshape(data.shape)

def split_collected_dataset(data_path, str_num, test_size=0.4, val_size=0.1, random_state=42):
    # Load data and labels
    data = np.load(data_path+'collected_data_'+str_num+'_all.npy')  # Shape: [7, 5000, 2, 128]
    labels = np.load(data_path+'collected_label_'+str_num+'_all.npy')  # Shape: [7, 5000]

     # Initialize lists to hold data and labels for each set
    train_data, val_data, test_data = [], [], []
    train_labels, val_labels, test_labels = [], [], []

    # Split data for each class
    for i in range(data.shape[0]):  # Iterate over each class
        class_data = data[i]  # Shape: [5000, 2, 128]
        class_labels = labels[i]  # Shape: [5000]

        # Split into test and remaining data
        class_train_val_data, class_test_data, class_train_val_labels, class_test_labels = train_test_split(
            class_data, class_labels, test_size=test_size, random_state=random_state)

        # Split remaining data into training and validation
        class_train_data, class_val_data, class_train_labels, class_val_labels = train_test_split(
            class_train_val_data, class_train_val_labels, test_size=val_size, random_state=random_state)

        # Append to respective sets
        train_data.append(class_train_data)
        val_data.append(class_val_data)
        test_data.append(class_test_data)
        train_labels.append(class_train_labels)
        val_labels.append(class_val_labels)
        test_labels.append(class_test_labels)

    # Concatenate lists to create final datasets
    train_data = np.concatenate(train_data, axis=0).reshape(-1, 1, 2, 128)
    val_data = np.concatenate(val_data, axis=0).reshape(-1, 1, 2, 128)
    test_data = np.concatenate(test_data, axis=0).reshape(-1, 1, 2, 128)
    train_labels = np.concatenate(train_labels, axis=0).reshape(-1,1)
    val_labels = np.concatenate(val_labels, axis=0).reshape(-1,1)
    test_labels = np.concatenate(test_labels, axis=0).reshape(-1,1)

    # Print shapes to verify
    print("Train data shape:", train_data.shape)
    print("Validation data shape:", val_data.shape)
    print("Test data shape:", test_data.shape)
    print("Train labels shape:", train_labels.shape)
    print("Validation labels shape:", val_labels.shape)
    print("Test labels shape:", test_labels.shape)

    # Save the split datasets
    np.save(data_path+'train_collected_data_'+str_num+'.npy', train_data)
    np.save(data_path+'val_collected_data_'+str_num+'.npy', val_data)
    np.save(data_path+'test_collected_data_'+str_num+'.npy', test_data)
    np.save(data_path+'train_collected_labels_'+str_num+'.npy', train_labels)
    np.save(data_path+'val_collected_labels_'+str_num+'.npy', val_labels)
    np.save(data_path+'test_collected_labels_'+str_num+'.npy', test_labels)

# Example usage
if __name__ == "__main__":
    data_path = '/data4t/wwy_data/AML/dataset/'
    # split_collected_dataset
    # label_match = np.array([[0, 1, 2, 3, 4, 5, 6],
    #                     ['16psk', '16qam', '64qam', '8psk', '8qam', 'bpsk', 'qpsk']])
    number = 5000
    split_collected_dataset(data_path, str(number), test_size=0.2, val_size=0.1, random_state=42)
