import torch
import os
from torch.utils.data import Dataset
import soundfile as sf
import numpy as np
import pandas as pd
from scipy.io import wavfile
from scipy import signal
from scipy.signal import resample_poly, correlate, correlation_lags
from numpy.linalg import norm
from diffuse_noise import (gen_desired_spatial_coherence, gen_diffuse_noise)
from pathlib import Path
def circular_array_geometry(radius: float, mic_num: int) -> np.ndarray:
    # 生成圆阵的拓扑（原点为中心），后期可以通过旋转、改变中心的位置来实现阵列位置的改变
    pos_rcv = np.empty((mic_num, 3))
    v1 = np.array([1, 0, 0])  # 第一个麦克风的位置（要求单位向量）
    v1 = normalize(v1)  # 单位向量
    # 将v1绕原点水平旋转angle角度，来生成其他mic的位置
    angles = np.arange(0, 2 * np.pi, 2 * np.pi / mic_num)
    for idx, angle in enumerate(angles):
        x = v1[0] * np.cos(angle) - v1[1] * np.sin(angle)
        y = v1[0] * np.sin(angle) + v1[1] * np.cos(angle)
        pos_rcv[idx, :] = normalize(np.array([x, y, 0]))
    # 设置radius
    pos_rcv *= radius
    return pos_rcv

def normalize(vec: np.ndarray) -> np.ndarray:
    # get unit vector
    vec = vec / norm(vec)
    vec = vec / norm(vec)
    assert np.isclose(norm(vec), 1), 'norm of vec is not close to 1'
    return vec

def audiowu_high_array_geometry() -> np.array:
    # the high-resolution mic array of the audio lab of westlake university
    R = 0.03
    pos_rcv = np.zeros((9, 3))
    pos_rcv[:8, :] = circular_array_geometry(radius=R, mic_num=8)
    return pos_rcv


def search_files(dir_path,flag):
    result = []
    file_list = os.listdir(dir_path)  # 获取当前文件夹下的所有文件
    for file_name in file_list:
        complete_file_name = os.path.join(dir_path, file_name)  # 获取包含路径的文件名
        if os.path.isdir(complete_file_name):  # 如果是文件夹
            result.extend(search_files(complete_file_name,flag)) 
        if os.path.isfile(complete_file_name): 
            if complete_file_name.endswith(flag):
                result.append(complete_file_name)   
           # 输出找到的文件的路径
    return result

def pad_or_cut(wavs, lens, rng):
    """repeat signals if they are shorter than the length needed, then cut them to needed
    """
    for i, wav in enumerate(wavs):
        # repeat
        while len(wav) < lens[i]:
            wav = np.concatenate([wav, wav])
        # cut to needed length
        if len(wav) > lens[i]:
            start = rng.integers(low=0, high=len(wav) - lens[i] + 1)
            wav = wav[start:start + lens[i]]
        wavs[i] = wav
    return wavs

class RealData(Dataset):
	def __init__(self, data_dir, target_dir, noise_dir, add_noise_type, keywords, new_fs=16000, snr = [-5,10], wav_use_len=4, high_level=True):
        
		#self.label_paths = 	search_files(data_dir,flag='high.loc.csv')
		if high_level:
			self.level='CH1.flac'
		
		self.data_paths = []
		self.all_targets = pd.DataFrame() 
		for dir in target_dir:
			target = pd.read_csv(dir)
			self.data_paths += [data_dir+i for i in target['filename'].to_list()]
			self.all_targets = pd.concat([self.all_targets, target], ignore_index=True)
		#print(self.data_paths)
		self.all_targets.set_index('filename', inplace=True)
		#print(self.data_paths)
		noise_paths = search_files(noise_dir,flag=self.level)
		
		self.noise_paths = [path for path in noise_paths if any(keyword in path for keyword in keywords)]
		#print(self.noise_paths)
		self.new_fs = new_fs
		self.SNR = snr
		self.wav_use_len = 4
		self.noise_type = add_noise_type
		pos_mics =  audiowu_high_array_geometry()
		_, self.Cs = gen_desired_spatial_coherence(pos_mics=pos_mics, fs=self.new_fs, noise_field='spherical', c=343, nfft=512)
		self.noises_babble = list(Path('/home/wangyabo/real_record_exp/clean').rglob('*.wav'))
		self.factory_noises = [sf.read(noise_path, dtype='float32', always_2d=False)[0] for noise_path in ['/home/wangyabo/real_record_exp/Noise92X/factory1.wav', '/home/wangyabo/real_record_exp/Noise92X/factory2.wav']]  # [T]
		self.factory_noise_sr = sf.info('/home/wangyabo/real_record_exp/Noise92X/factory1.wav').samplerate
		self.factory_noises = [resample_poly(noise, up=self.new_fs, down=self.factory_noise_sr, axis=0) for noise in self.factory_noises]
		#self.SNR = 15
		# 获取target路径和wav路径
  
        ################################# VAD CHECK ########################
        noise_vad_path = data_dir + "/vad_results.txt"
        self.noise_vad = [x.strip() for x in open(noise_vad_path, "r").readlines()]
        self.noise_vad = {x.split(" ")[0]: self.vad_annotation_to_tuple(x.split(" ")[1]) for x in self.noise_vad}
        
  
	def __len__(self):
		return len(self.data_paths)
	
    def vad_annotation_to_tuple(self, vad_annotation):
        return [(float(x.split("_")[0]), float(x.split("_")[1])) for x in vad_annotation.split(",")]
 
    # 1s = frame_size (doa frame)
	def seg_signal(self,signal,fs,rng,len_signal_s=4):
		signal_start = rng.integers(low=0, high=signal.shape[0]-(len_signal_s*fs))
		#print(signal_start,signal_start*fs//frame_size,(signal_start+len_signal_s*frame_size)*fs//frame_size)
		seg_signal = signal[signal_start:signal_start+(len_signal_s*fs),:]
		return seg_signal,signal_start
	# def seg_noise(self,signal,fs,len_signal_s=4):
	# 	signal_start = np.random.randint(low=0, high=signal.shape[0]-(len_signal_s*fs))
	# 	#print(signal_start,signal_start*fs//frame_size,(signal_start+len_signal_s*frame_size)*fs//frame_size)
	# 	seg_signal = signal[signal_start:signal_start+(len_signal_s*fs),:]
		#return seg_signal,signal_start
	def load_signals(self,sig_path):
		channels = []
		for i in range(9):
			if i == 8:
				temp_path = sig_path.replace('.wav','_CH25.wav')
			else:
				temp_path = sig_path.replace('.wav','_CH'+str(i+1)+'.wav')
			single_ch_signal,fs = sf.read(temp_path)
			channels.append(single_ch_signal)
		mul_ch_signals = np.stack(channels, axis=-1)
		return mul_ch_signals,fs

	def load_noise(self,noise_path,begin_index,end_index):
		channels = []
		for i in range(9):
			if i == 8:
				temp_path = noise_path.replace('_CH1.flac', f'_CH25.flac')
			else:
				temp_path = noise_path.replace('_CH1.flac', f'_CH{i+1}.flac')
			single_ch_signal,fs = sf.read(temp_path,start=begin_index, stop=end_index)
			channels.append(single_ch_signal)
		mul_ch_signals = np.stack(channels, axis=-1)
		return mul_ch_signals,fs

	def resample(self,mic_signal,fs,new_fs):
		signal_resampled = signal.resample(mic_signal, int(mic_signal.shape[0] * new_fs / fs))
		return signal_resampled

	def get_snr_coff(self, wav1, wav2, target_dB):
		ae1 = np.sum(wav1**2) / np.prod(wav1.shape)
		ae2 = np.sum(wav2**2) / np.prod(wav2.shape)
		if ae1 == 0 or ae2 == 0 or not np.isfinite(ae1) or not np.isfinite(ae2):
			return None
		coeff = np.sqrt(ae1 / ae2 * np.power(10, -target_dB / 10))
		return coeff

	def __getitem__(self, idx_seed):

		idx,seed = idx_seed
		rng = np.random.default_rng(np.random.PCG64(seed))

		sig_path = self.data_paths[idx]
		snr_item = rng.uniform(self.SNR[0], self.SNR[1])
		mic_signal, fs = self.load_signals(sig_path)
		len_signal = mic_signal.shape[0] / self.new_fs
		if len_signal < 5:
              # 创建一个长度为 3 秒的 new_mic_signal
			new_length = int(self.wav_use_len * self.new_fs)
			new_mic_signal = np.zeros((new_length, mic_signal.shape[1]))
			min_length = min(new_length, mic_signal.shape[0])
			new_mic_signal[:min_length, :] = mic_signal[:min_length, :]
			target = self.all_targets.at[sig_path.split('final_train/')[-1], 'angle(°)']
			if isinstance(target, float):
				labels = torch.ones((40,1)) * int(target)
				vad_source = torch.zeros((40,1))  # 创建长度为40的数组，初始值为0
				end_index = min(int(len_signal * 10), 40)  # 确保不会超过数组长度40
				vad_source[:end_index] = 1  # 设置前end_index个元素为1	
    			#np.zeros((new_length, mic_signal.shape[1]))
			elif isinstance(target, str):
				temp_labels = np.array([int(float(i)) for i in target.split(',')])
				labels = torch.zeros((40,1))
				length_to_copy = min(len(temp_labels), 40)
				labels[:length_to_copy,:] = torch.from_numpy(temp_labels[:length_to_copy,np.newaxis])
				vad_source = torch.zeros((40,1))
				vad_source[:length_to_copy] = 1 				
		else:
			new_mic_signal,signal_start = self.seg_signal(signal=mic_signal,fs = self.new_fs,rng=rng)
			#print(new_mic_signal.shape,self.new_fs,signal_start)
			target = self.all_targets.at[sig_path.split('final_train/')[-1], 'angle(°)']
			if isinstance(target, float):
				labels = torch.ones((40,1)) * int(target)
				vad_source = torch.ones((40,1))  # 创建长度为40的数组，初始值为0
			elif isinstance(target, str):
				labels = np.array([int(float(i)) for i in target.split(',')])
				label_begin = int(signal_start / 1600)
				labels = torch.from_numpy(labels[label_begin:label_begin+40,np.newaxis])
				#print(len(labels))
				vad_source = torch.ones((40,1))
				#print(labels)
		if self.noise_type == 'real':
			noise_path = self.noise_paths[rng.integers(low=0, high=len(self.noise_paths))]
			wav_info = sf.info(noise_path)
			wav_frames = wav_info.frames
			#print(wav_frames)
			noise_begin_index =  rng.integers(low=0, high=wav_frames-(self.wav_use_len*fs))
			noise_end_index =  noise_begin_index + (self.wav_use_len*fs)
			noise_signal,noise_fs = self.load_noise(noise_path,begin_index=noise_begin_index,end_index=noise_end_index)
		elif self.noise_type == 'simu':
			#print('aa')
			noise_type_list = ['white', 'babble', 'factory']
			noise_type = noise_type_list[rng.integers(low=0, high=3)]
			mix_frames = new_mic_signal.shape[0]
            # generate diffuse noise
			if noise_type == 'babble':
				noises = []
				for i in range(9):
					noise_i = np.zeros(shape=(mix_frames, ), dtype=new_mic_signal.dtype)
					for j in range(10):
						noise_path = self.noises_babble[rng.integers(low=0, high=len(self.noises_babble))]
						noise_ij, sr_noise = sf.read(noise_path, dtype='float32', always_2d=False)  # [T]
						noise_ij = resample_poly(noise_ij, up=self.new_fs, down=sr_noise, axis=0)
						assert noise_ij.ndim == 1
						noise_i += pad_or_cut([noise_ij], lens=[mix_frames], rng=rng)[0]
					noises.append(noise_i)
				noise = np.stack(noises, axis=0).reshape(-1)
				noise_signal = gen_diffuse_noise(noise=noise, L=mix_frames, Cs=self.Cs, nfft=512, rng=rng)  # shape [num_mic, mix_frames]
			elif noise_type == 'white':
				noise = rng.normal(size=new_mic_signal.shape[0] * new_mic_signal.shape[1])
				noise_signal = gen_diffuse_noise(noise=noise, L=mix_frames, Cs=self.Cs, nfft=512, rng=rng)  # shape [num_mic, mix_frames]
			elif noise_type == 'factory':
				noise = self.factory_noises[rng.integers(low=0, high=len(self.factory_noises))]
				noise = pad_or_cut([noise], lens=[mix_frames*9], rng=rng)[0]  # [T*9]
				noise_signal = gen_diffuse_noise(noise=noise, L=mix_frames, Cs=self.Cs, nfft=512, rng=rng)  # shape [num_mic, mix_frames]
			else:
				assert noise_type == 'point', ('unknown noise type', noise_type)
				noise_signal = None
			noise_signal = noise_signal.T
		#noise_signal,_ = self.seg_noise(signal=noise_signal, fs = noise_fs)
  		#noise_signal = self.resample(noise_signal,noise_fs,self.new_fs)
		#print(noise_signal.shape)
		coeff =  self.get_snr_coff(new_mic_signal,noise_signal,snr_item)
		try:
			assert coeff is not None
		except:
			coeff = 1.0
		noise_signal = coeff * noise_signal
		new_mic_signal += noise_signal
		# sf.write('./sample/' + str(idx)+'.wav',new_mic_signal,self.new_fs)
		#print(new_mic_signal.shape,labels.to(torch.float32).shape,vad_source.to(torch.float32).shape)
		return new_mic_signal,labels.to(torch.float32),vad_source.to(torch.float32)
		# return mic_signals, acoustic_scene
  
class RealData2(Dataset):
	def __init__(self, data_dir, target_dir,noise_dir,add_noise_type,keywords, new_fs=16000, snr = [-5,10], wav_use_len=4, high_level=True):
        
		#self.label_paths = 	search_files(data_dir,flag='high.loc.csv')
		if high_level:
			self.level='CH1.flac'
		
		self.data_paths = []
		self.all_targets = pd.DataFrame() 
		for dir in target_dir:
			target = pd.read_csv(dir)
			self.data_paths += [data_dir+i for i in target['filename'].to_list()]
			self.all_targets = pd.concat([self.all_targets, target], ignore_index=True)
		#print(self.data_paths)
		self.all_targets.set_index('filename', inplace=True)
		#print(self.data_paths)
		noise_paths = search_files(noise_dir,flag=self.level)
		
		self.noise_paths = [path for path in noise_paths if any(keyword in path for keyword in keywords)]
		#print(self.noise_paths)
		self.new_fs = new_fs
		self.SNR = snr
		self.wav_use_len = 4
		self.noise_type = add_noise_type
		pos_mics =  audiowu_high_array_geometry()
		_, self.Cs = gen_desired_spatial_coherence(pos_mics=pos_mics, fs=self.new_fs, noise_field='spherical', c=343, nfft=512)
		self.noises_babble = list(Path('/home/wangyabo/real_record_exp/clean').rglob('*.wav'))
		self.factory_noises = [sf.read(noise_path, dtype='float32', always_2d=False)[0] for noise_path in ['/home/wangyabo/real_record_exp/Noise92X/factory1.wav', '/home/wangyabo/real_record_exp/Noise92X/factory2.wav']]  # [T]
		self.factory_noise_sr = sf.info('/home/wangyabo/real_record_exp/Noise92X/factory1.wav').samplerate
		self.factory_noises = [resample_poly(noise, up=self.new_fs, down=self.factory_noise_sr, axis=0) for noise in self.factory_noises]
		#self.SNR = 15
		# 获取target路径和wav路径
	def __len__(self):
		return len(self.data_paths)
	
    # 1s = frame_size (doa frame)
	def seg_signal(self,signal,fs,rng,len_signal_s=4):
		signal_start = rng.integers(low=0, high=signal.shape[0]-(len_signal_s*fs))
		#print(signal_start,signal_start*fs//frame_size,(signal_start+len_signal_s*frame_size)*fs//frame_size)
		seg_signal = signal[signal_start:signal_start+(len_signal_s*fs),:]
		return seg_signal,signal_start
	# def seg_noise(self,signal,fs,len_signal_s=4):
	# 	signal_start = np.random.randint(low=0, high=signal.shape[0]-(len_signal_s*fs))
	# 	#print(signal_start,signal_start*fs//frame_size,(signal_start+len_signal_s*frame_size)*fs//frame_size)
	# 	seg_signal = signal[signal_start:signal_start+(len_signal_s*fs),:]
		#return seg_signal,signal_start
	def load_signals(self,sig_path):
		channels = []
		for i in range(9):
			if i == 8:
				temp_path = sig_path.replace('.flac','_CH25.flac')
			else:
				temp_path = sig_path.replace('.flac','_CH'+str(i+1)+'.flac')
			single_ch_signal,fs = sf.read(temp_path)
			channels.append(single_ch_signal)
		mul_ch_signals = np.stack(channels, axis=-1)
		return mul_ch_signals,fs

	def load_noise(self,noise_path,begin_index,end_index):
		channels = []
		for i in range(9):
			if i == 8:
				temp_path = noise_path.replace('_CH1.flac', f'_CH25.flac')
			else:
				temp_path = noise_path.replace('_CH1.flac', f'_CH{i+1}.flac')
			single_ch_signal,fs = sf.read(temp_path,start=begin_index, stop=end_index)
			channels.append(single_ch_signal)
		mul_ch_signals = np.stack(channels, axis=-1)
		return mul_ch_signals,fs

	def resample(self,mic_signal,fs,new_fs):
		signal_resampled = signal.resample(mic_signal, int(mic_signal.shape[0] * new_fs / fs))
		return signal_resampled

	def get_snr_coff(self, wav1, wav2, target_dB):
		ae1 = np.sum(wav1**2) / np.prod(wav1.shape)
		ae2 = np.sum(wav2**2) / np.prod(wav2.shape)
		if ae1 == 0 or ae2 == 0 or not np.isfinite(ae1) or not np.isfinite(ae2):
			return None
		coeff = np.sqrt(ae1 / ae2 * np.power(10, -target_dB / 10))
		return coeff

	def __getitem__(self, idx_seed):

		idx,seed = idx_seed
		rng = np.random.default_rng(np.random.PCG64(seed))

		sig_path = self.data_paths[idx]
		snr_item = rng.uniform(self.SNR[0], self.SNR[1])
		mic_signal, fs = self.load_signals(sig_path)
		len_signal = mic_signal.shape[0] / self.new_fs
		if len_signal < 5:
              # 创建一个长度为 3 秒的 new_mic_signal
			new_length = int(self.wav_use_len * self.new_fs)
			new_mic_signal = np.zeros((new_length, mic_signal.shape[1]))
			min_length = min(new_length, mic_signal.shape[0])
			new_mic_signal[:min_length, :] = mic_signal[:min_length, :]
			target = self.all_targets.at[sig_path.split('data_flac_16/')[-1], 'angle(°)']
			if isinstance(target, float):
				labels = torch.ones((40,1)) * int(target)
				vad_source = torch.zeros((40,1))  # 创建长度为40的数组，初始值为0
				end_index = min(int(len_signal * 10), 40)  # 确保不会超过数组长度40
				vad_source[:end_index] = 1  # 设置前end_index个元素为1	
    			#np.zeros((new_length, mic_signal.shape[1]))
			elif isinstance(target, str):
				temp_labels = np.array([int(float(i)) for i in target.split(',')])
				labels = torch.zeros((40,1))
				length_to_copy = min(len(temp_labels), 40)
				labels[:length_to_copy,:] = torch.from_numpy(temp_labels[:length_to_copy,np.newaxis])
				vad_source = torch.zeros((40,1))
				vad_source[:length_to_copy] = 1 				
		else:
			new_mic_signal,signal_start = self.seg_signal(signal=mic_signal,fs = self.new_fs,rng=rng)
			#print(new_mic_signal.shape,self.new_fs,signal_start)
			target = self.all_targets.at[sig_path.split('data_flac_16/')[-1], 'angle(°)']
			if isinstance(target, float):
				labels = torch.ones((40,1)) * int(target)
				vad_source = torch.ones((40,1))  # 创建长度为40的数组，初始值为0
			elif isinstance(target, str):
				labels = np.array([int(float(i)) for i in target.split(',')])
				label_begin = int(signal_start / 1600)
				labels = torch.from_numpy(labels[label_begin:label_begin+40,np.newaxis])
				#print(len(labels))
				vad_source = torch.ones((40,1))
				#print(labels)
		if self.noise_type == 'real':
			noise_path = self.noise_paths[rng.integers(low=0, high=len(self.noise_paths))]
			wav_info = sf.info(noise_path)
			wav_frames = wav_info.frames
			#print(wav_frames)
			noise_begin_index =  rng.integers(low=0, high=wav_frames-(self.wav_use_len*fs))
			noise_end_index =  noise_begin_index + (self.wav_use_len*fs)
			noise_signal,noise_fs = self.load_noise(noise_path,begin_index=noise_begin_index,end_index=noise_end_index)
		elif self.noise_type == 'simu':
			#print('aa')
			noise_type_list = ['white', 'babble', 'factory']
			noise_type = noise_type_list[rng.integers(low=0, high=3)]
			mix_frames = new_mic_signal.shape[0]
            # generate diffuse noise
			if noise_type == 'babble':
				noises = []
				for i in range(9):
					noise_i = np.zeros(shape=(mix_frames, ), dtype=new_mic_signal.dtype)
					for j in range(10):
						noise_path = self.noises_babble[rng.integers(low=0, high=len(self.noises_babble))]
						noise_ij, sr_noise = sf.read(noise_path, dtype='float32', always_2d=False)  # [T]
						noise_ij = resample_poly(noise_ij, up=self.new_fs, down=sr_noise, axis=0)
						assert noise_ij.ndim == 1
						noise_i += pad_or_cut([noise_ij], lens=[mix_frames], rng=rng)[0]
					noises.append(noise_i)
				noise = np.stack(noises, axis=0).reshape(-1)
				noise_signal = gen_diffuse_noise(noise=noise, L=mix_frames, Cs=self.Cs, nfft=512, rng=rng)  # shape [num_mic, mix_frames]
			elif noise_type == 'white':
				noise = rng.normal(size=new_mic_signal.shape[0] * new_mic_signal.shape[1])
				noise_signal = gen_diffuse_noise(noise=noise, L=mix_frames, Cs=self.Cs, nfft=512, rng=rng)  # shape [num_mic, mix_frames]
			elif noise_type == 'factory':
				noise = self.factory_noises[rng.integers(low=0, high=len(self.factory_noises))]
				noise = pad_or_cut([noise], lens=[mix_frames*9], rng=rng)[0]  # [T*9]
				noise_signal = gen_diffuse_noise(noise=noise, L=mix_frames, Cs=self.Cs, nfft=512, rng=rng)  # shape [num_mic, mix_frames]
			else:
				assert noise_type == 'point', ('unknown noise type', noise_type)
				noise_signal = None
			noise_signal = noise_signal.T
		#noise_signal,_ = self.seg_noise(signal=noise_signal, fs = noise_fs)
  		#noise_signal = self.resample(noise_signal,noise_fs,self.new_fs)
		#print(noise_signal.shape)
		coeff =  self.get_snr_coff(new_mic_signal,noise_signal,snr_item)
		try:
			assert coeff is not None
		except:
			coeff = 1.0
		noise_signal = coeff * noise_signal
		new_mic_signal += noise_signal
		# sf.write('./sample/' + str(idx)+'.wav',new_mic_signal,self.new_fs)
		#print(new_mic_signal.shape,labels.to(torch.float32).shape,vad_source.to(torch.float32).shape)
		return new_mic_signal,labels.to(torch.float32),vad_source.to(torch.float32)
		# return mic_signals, acoustic_scene

class RealData_test_noise(Dataset):
	def __init__(self, data_dir, target_dir,noise_dir, new_fs=16000, snr = [-5,15], high_level=True):
        
		#self.label_paths = 	search_files(data_dir,flag='high.loc.csv')
		if high_level:
			self.level='high.wav'
		
		self.data_paths = []
		self.all_targets = pd.DataFrame() 
		for dir in target_dir:
			target = pd.read_csv(dir)
			self.data_paths += [data_dir+i for i in target['filename'].to_list()]
			self.all_targets = pd.concat([self.all_targets, target], ignore_index=True)
		#print(self.data_paths)
		self.all_targets.set_index('filename', inplace=True)
		#print(self.data_paths)
		#self.noise_paths = search_files(noise_dir,flag=self.level)
		self.new_fs = new_fs
		self.SNR = snr
		#self.SNR = 15
		# 获取target路径和wav路径
	def __len__(self):
		return len(self.data_paths)

	def load_signals(self,sig_path):
		channels = []
		for i in range(9):
			if i == 8:
				temp_path = sig_path.replace('.flac','_CH25.flac')
			else:
				temp_path = sig_path.replace('.flac','_CH'+str(i+1)+'.flac')
			single_ch_signal,fs = sf.read(temp_path)
			channels.append(single_ch_signal)
		mul_ch_signals = np.stack(channels, axis=-1)
		return mul_ch_signals,fs

	def resample(self,mic_signal,fs,new_fs):
		signal_resampled = signal.resample(mic_signal, int(mic_signal.shape[0] * new_fs / fs))
		return signal_resampled

	def __getitem__(self, idx):
		sig_path = self.data_paths[idx]
		mic_signal, fs = self.load_signals(sig_path)
		#print(fs)
		#加一个下采样 48k->16k
		#mic_signal = self.resample(mic_signal,fs,self.new_fs)
		len_signal = mic_signal.shape[0] / self.new_fs
		num_points = int(len_signal * 10)
		#print(num_points)
		target = self.all_targets.at[sig_path.split('dataset_val_test_0524/')[-1], 'angle(°)']
		if isinstance(target, float):
			labels = torch.ones((num_points,1)) * int(target)
		elif isinstance(target, str):
			labels = np.array([int(float(i)) for i in target.split(',')])
			labels = torch.from_numpy(labels[:,np.newaxis])
		vad_source =  torch.ones((labels.shape[0],1))
		return mic_signal,labels.to(torch.float32),vad_source.to(torch.float32)


class RealData_test(Dataset):
	def __init__(self, data_dir, target_dir,noise_dir, new_fs=16000, snr = [-5,15], high_level=True):
        
		#self.label_paths = 	search_files(data_dir,flag='high.loc.csv')
		if high_level:
			self.level='high.wav'
		
		self.data_paths = []
		self.all_targets = pd.DataFrame() 
		for dir in target_dir:
			target = pd.read_csv(dir)
			self.data_paths += [data_dir+i for i in target['filename'].to_list()]
			self.all_targets = pd.concat([self.all_targets, target], ignore_index=True)
		#print(self.data_paths)
		self.all_targets.set_index('filename', inplace=True)
		#print(self.data_paths)
		#self.noise_paths = search_files(noise_dir,flag=self.level)
		self.new_fs = new_fs
		self.SNR = snr
		#self.SNR = 15
		# 获取target路径和wav路径
	def __len__(self):
		return len(self.data_paths)
	def load_signals(self,sig_path):
		channels = []
		for i in range(9):
			if i == 8:
				temp_path = sig_path.replace('.flac','_CH25.flac')
			else:
				temp_path = sig_path.replace('.flac','_CH'+str(i+1)+'.flac')
			single_ch_signal,fs = sf.read(temp_path)
			channels.append(single_ch_signal)
		mul_ch_signals = np.stack(channels, axis=-1)
		return mul_ch_signals,fs

	def resample(self,mic_signal,fs,new_fs):
		signal_resampled = signal.resample(mic_signal, int(mic_signal.shape[0] * new_fs / fs))
		return signal_resampled

	def __getitem__(self, idx):
		sig_path = self.data_paths[idx]
		mic_signal, fs = self.load_signals(sig_path)
		#print(fs)
		#加一个下采样 48k->16k
		#mic_signal = self.resample(mic_signal,fs,self.new_fs)
		len_signal = mic_signal.shape[0] / self.new_fs
		num_points = int(len_signal * 10)
		#print(num_points)
		target = self.all_targets.at[sig_path.split('data_flac_16/')[-1], 'angle(°)']
		if isinstance(target, float):
			labels = torch.ones((num_points,1)) * int(target)
		elif isinstance(target, str):
			labels = np.array([int(float(i)) for i in target.split(',')])
			labels = torch.from_numpy(labels[:,np.newaxis])
		vad_source =  torch.ones((labels.shape[0],1))
		return mic_signal,labels.to(torch.float32),vad_source.to(torch.float32)
