import os
import numpy as np
from torch.utils.data import DataLoader, TensorDataset, Dataset
import torch
from PIL import Image
from torchvision import transforms

class MyDataset(Dataset):
    def __init__(self, data1, data2, data3):
        self.hjorth = data1  # (num_samples, 4, 5)
        self.hibert = data2  # (num_samples, 400, 57)
        self.output = data3

    def __len__(self):
        return len(self.hjorth)

    def __getitem__(self, idx):
        return self.hjorth[idx], self.hibert[idx], self.output[idx]

def getEEGDataloader(class_num, batch_size, feature_num=1):
    root_data_path = '/root/data/video_decoding'
    train_path2 = os.path.join(root_data_path, f'npy_data_class_{class_num}_zscore', 'train')
    val_path2 = os.path.join(root_data_path, f'npy_data_class_{class_num}_zscore', 'val')
    if feature_num == 3:
        project1 = []
        project2 = []
    train_names2 = os.listdir(train_path2)
    feat_train_data2 = []
    
    if feature_num == 2:
        train_path1 = os.path.join(root_data_path, f'npy_data_class_{class_num}', 'train')
        val_path1 = os.path.join(root_data_path, f'npy_data_class_{class_num}', 'val')
        train_names1 = os.listdir(train_path1)
        feat_train_data1 = []
        feat_val_data2 = []
        val_names1 = os.listdir(val_path1)
    
    length = len(train_names2)
    count = 0
    spec_train_data = []
    while count < length:
        if feature_num == 2:
            train_name1 = train_names1[count]
            file1 = np.load(os.path.join(train_path1, train_name1))
            feat_train_data1.append(file1)
        
        train_name2 = train_names2[count]
        file2= np.load(os.path.join(train_path2, train_name2))
        file2 = file2.transpose((1, 0))
        feat_train_data2.append(file2)
        if class_num == 2:
            if train_name2[16]=='1':
                spec = 0
            elif train_name2[16]=='2':
                spec = 1
            if feature_num == 3:
                random_number = np.random.randint(spec*15, (spec+1)*15)
                project1.append(f"{spec+1}_frame_{random_number:02d}.jpg")
                
        if class_num == 30:
            basename = os.path.basename(train_name2)
            file_part, ext = os.path.splitext(basename)
            number = file_part.split("_")[-1]
            if feature_num == 3:
                project1.append(f"{spec+1}_frame_{number.zfill(2)}.jpg")
            else:
                spec = (int)(number)
        spec_train_data.append(spec)
        count += 1
    
    feat_train2 = np.array(feat_train_data2)
    spec_train = np.array(spec_train_data)
    print(feat_train2.shape)
    print(spec_train.shape)
    
    if feature_num == 2:
        feat_train1 = np.array(feat_train_data1)
        print(feat_train1.shape)
        feat_val_data1 = []  

    val_names2 = os.listdir(val_path2)
    feat_val_data2 = []
    length = len(val_names2)
    count = 0
    spec_val_data = []
    while count < length:
        if feature_num == 2:
            val_name1 = val_names1[count]
            file1 = np.load(os.path.join(val_path1, val_name1))
            feat_val_data1.append(file1)
        
        val_name2 = val_names2[count]
        file2 = np.load(os.path.join(val_path2, val_name2))
        file2 = file2.transpose((1, 0))
        feat_val_data2.append(file2)
        
        if class_num == 2:
            if val_name2[16]=='1':
                spec = 0
            elif val_name2[16]=='2':
                spec = 1
            if feature_num == 3:
                random_number = np.random.randint(spec*15, (spec+1)*15)
                project2.append(f"{spec+1}_frame_{random_number:02d}.jpg")
                
        if class_num == 30:
            basename = os.path.basename(val_name2)
            file_part, ext = os.path.splitext(basename)
            number = file_part.split("_")[-1]
            if feature_num == 3:
                project2.append(f"{spec+1}_frame_{number.zfill(2)}.jpg")
            else:
                spec = (int)(number)
        spec_val_data.append(spec)
        count += 1
    
    feat_val2 = np.array(feat_val_data2)
    spec_val = np.array(spec_val_data)
    print(feat_val2.shape)
    print(spec_val.shape)
    
    if feature_num == 2:
        feat_val1 = np.array(feat_val_data1)
        print(feat_val1.shape)
        train_dataset = MyDataset(torch.tensor(feat_train1).to(torch.float), torch.tensor(feat_train2).to(torch.float), torch.tensor(spec_train).to(torch.int64))
        val_dataset = MyDataset(torch.tensor(feat_val1).to(torch.float), torch.tensor(feat_val2).to(torch.float), torch.tensor(spec_val).to(torch.int64))

    elif feature_num == 1:
       train_dataset = TensorDataset(torch.tensor(feat_train2).to(torch.float), torch.tensor(spec_train).to(torch.int64))
       val_dataset = TensorDataset(torch.tensor(feat_val2).to(torch.float), torch.tensor(spec_val).to(torch.int64))
 
    else:
        image_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
        train_dataset = EEGImageDataset(project1, torch.tensor(feat_train2).to(torch.float), 
                                        torch.tensor(spec_train).to(torch.int64), image_transform)
        val_dataset = EEGImageDataset(project2, torch.tensor(feat_val2).to(torch.float), 
                                        torch.tensor(spec_val).to(torch.int64), image_transform)
        
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
    
    return train_loader, val_loader


# 图像  
class EEGImageDataset(Dataset):
    def __init__(self, image_files, eeg_features, eeg_labels, image_transform):
        self.image_path = "/root/data/video_decoding/frames"        
        self.image_files = image_files
        self.eeg_features = eeg_features
        self.eeg_labels = eeg_labels
        self.image_transform = image_transform

        # 数据长度不一致时，使用数据增强策略
        self.data_length = max(len(self.eeg_labels), len(self.image_files))
        
    def __len__(self):
        return self.data_length

    def __getitem__(self, idx):
        # 读取图像
        image_file = self.image_files[idx]
        image = Image.open(os.path.join(self.image_path, image_file)).convert('RGB')
        image = self.image_transform(image)

        # 获取对应脑电信号和类别标签
        eeg_feature =  self.eeg_features[idx % len(self.eeg_features)]  # 将 EEG 特征移动到GPU
        eeg_label = self.eeg_labels[idx] 

        # 构建对比学习的数据对
        return image, eeg_feature, eeg_label
    
    
def load_picture(num):
    root_path = "/root/data/video_decoding/frames"
    if num == 0:    # 加载两张图
        image_files = ["1_frame_05.jpg", "2_frame_25.jpg"]
    else:
        image_files = sorted(os.listdir(root_path))
    
    images = [] 
    for image_file in image_files:
        image_file = os.path.join(root_path, image_file)
        image_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
        image = Image.open(image_file).convert('RGB')
        image = image_transform(image)
        images.append(image)
    images = np.array(images)
    print(images.shape)
    return torch.tensor(images)

def similarity_function(eeg_feature, image_features):
    similarities = []
    for eeg in eeg_feature:
        eeg = eeg.unsqueeze(0)
        sims = torch.nn.functional.cosine_similarity(eeg.expand(image_features.shape[0], -1), image_features, dim=1)
        max_sim_index = torch.argmax(sims)
        similarities.append(max_sim_index.item())
    ans = torch.tensor(similarities)
    return ans


'''
加载音频 + EEG 
'''
import torchaudio

def getEEGWaveDataloader(batch_size, processor):
    root_data_path = '/root/data/video_decoding'
    train_path = os.path.join(root_data_path, f'npy_data_class_30_zscore', 'train')
    train_names = os.listdir(train_path)
    wave_names = []
    feat_train_data = []

    for train_name in train_names:
        file = np.load(os.path.join(train_path, train_name))
        file = file.transpose((1, 0))
        
        name = train_name[:-4]
        number = int(name[20:])        
        if number >= 15:
            # continue
            wave_names.append(f"porter_{number-14}.wav")
        else:
            wave_names.append(f"mj_{number+1}.wav")
        
        # if number < 15:
        #     continue
        # else:
        #     wave_names.append(f"porter_{number-14}.wav")
        
        feat_train_data.append(file)

    feat_train = np.array(feat_train_data)
    print(feat_train.shape)
    
    train_dataset = EEGWaveDataset(wave_names, torch.tensor(feat_train).to(torch.float), processor)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    
    return train_loader


class EEGWaveDataset(Dataset):
    def __init__(self, wave_files, eeg_features, processor):
        self.wave_path = '/root/data/video_decoding/video/split'
        self.wave_files = wave_files
        self.eeg_features = eeg_features
        self.data_length = len(self.wave_files)
        self.processor = processor
                
    def __len__(self):
        return self.data_length

    def __getitem__(self, idx):
        # 读取音频
        wave_file = os.path.join(self.wave_path, self.wave_files[idx])
        audio, orig_freq = torchaudio.load(wave_file)
        audio =  torchaudio.functional.resample(audio, orig_freq=orig_freq, new_freq=16000) # must be a 16 kHz waveform array
        audio_inputs = self.processor(audios=audio, return_tensors="pt", sampling_rate=16000)
        audio_inputs['input_features'] = audio_inputs['input_features'].reshape(-1, audio_inputs['input_features'].shape[2])
        audio_inputs['attention_mask'] = audio_inputs['attention_mask'].reshape(-1)
                
        eeg_feature =  self.eeg_features[idx] 

        # 构建对比学习的数据对
        return audio_inputs, eeg_feature

def load_waves(processor):
    root_data_path = '/root/data/video_decoding/video/split'
    # waves_path = [f'porter_{i}.wav' for i in range(1, 16)]
    waves_path = [f'mj_{i}.wav' for i in range(1, 16)] + [f'porter_{i}.wav' for i in range(1, 16)]

    input_features = []
    attention_mask = []
    for wave_path in waves_path:
        wave_file = os.path.join(root_data_path, wave_path)
        audio, orig_freq = torchaudio.load(wave_file)
        audio = torchaudio.functional.resample(audio, orig_freq=orig_freq, new_freq=16000) 
        audio_inputs = processor(audios=audio, return_tensors="pt", sampling_rate=16000)
        input_features.append(audio_inputs['input_features'].reshape(-1, audio_inputs['input_features'].shape[2]))
        attention_mask.append(audio_inputs['attention_mask'].reshape(-1))

    audio_inputs['input_features'] = torch.stack(input_features)
    audio_inputs['attention_mask'] = torch.stack(attention_mask)   
    return audio_inputs


def getEEGValidDataloader(batch_size, class_num, type_ = 'val', spec_path = 0, band = 'high_gamma'):
    root_data_path = '/root/data/video_decoding'
    val_path = os.path.join(root_data_path, f'npy_data_class_{class_num}_zscore', type_)
    if spec_path != 0:
        val_path = os.path.join(root_data_path, f'npy_data_class_{class_num}', band, type_)
    val_names = os.listdir(val_path)
    feat_val_data = []
    
    spec_val_data = []
    for val_name in val_names:
        file = np.load(os.path.join(val_path, val_name))
        if spec_path == 0:
            file = file.transpose((1, 0))
        
        name = val_name[:-4]
        if class_num != 2:
            number = int(name[20:]) 
        else:
            number = int(name[16]) - 1
             
        spec_val_data.append(number)
        feat_val_data.append(file)
    
    feat_val = np.array(feat_val_data)
    spec_val = np.array(spec_val_data)
    print(feat_val.shape)
    print(spec_val.shape)
    
    val_dataset = TensorDataset(torch.tensor(feat_val).to(torch.float), torch.tensor(spec_val).to(torch.int64))
    if type_ == 'train':
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
    else:
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    
    return val_loader
