import librosa
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import os
from torch.nn.utils.rnn import pad_sequence
import copy
from torch.utils.data import Subset



def load_audio(file_path):
  y, sr = librosa.load(file_path, sr=22050)
  return y, sr

    
def get_all_waves_name(Wave_path):
    files = []
    for file in os.listdir(Wave_path):
        if os.path.isfile(os.path.join(Wave_path, file)) and file.endswith('.wav'):
            files.append(os.path.join(Wave_path, file))
    return files

def parse_textgrid(file_path): #这个是用来处理PhoneLabeling的
    with open(file_path, 'r') as f:
        lines = f.readlines()
    intervals = []
    is_interval_tier = False
    for i, line in enumerate(lines):
        line = line.strip()
        if line == '"IntervalTier"':
            is_interval_tier = True
            continue
        if is_interval_tier:
            if line.startswith('"'):
                continue
            if len(line) == 0:
                is_interval_tier = False
                continue
            try:
                start = float(lines[i].strip())
                end = float(lines[i+1].strip())
                label = lines[i+2].strip().strip('"')
                intervals.append((start, end, label))
            except (ValueError, IndexError):
                pass
    return intervals

def get_ls_parse_textgrid(PhoneLabel_path):
    parse_textgrids = []
    for parse in os.listdir(PhoneLabel_path):
      if os.path.isfile(os.path.join(PhoneLabel_path, parse)):
          parse_textgrids.append(parse_textgrid(os.path.join(PhoneLabel_path, parse)))
    return parse_textgrids

def parse_prosody_labels(file_path): #这个是用来处理ProsodyLabeling的
    with open(file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    prosody_labels = []
    for i in range(0, len(lines), 2):
        sentence_id = lines[i].strip().split('\t')[0]
        sentence = lines[i].strip().split('\t')[1]
        pinyin = lines[i + 1].strip().split('\t')[0]
        words = pinyin.split(' ')
        prosody = [int(word[-1]) for word in words if word[-1].isdigit()]
        prosody_labels.append((sentence, words, prosody))
        if len(prosody_labels[-1]) != 3:
            print("prosody_labels error")
    return prosody_labels

def extract_mel_spectrogram(y, sr): #这个是用来提取梅尔频谱图的
    mel_spec = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=80)
    #log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
    return mel_spec

def align_labels(mel_spec, phone_labels, prosody_labels, sr, sil_index, hop_length=512):  #这个是用来对齐label的
    mel_length = mel_spec.shape[1]
    duration_per_frame = hop_length / sr

    # 创建对齐后的标签
    aligned_phones = [sil_index] * mel_length
    aligned_pitches = [0.0] * mel_length
    aligned_energies = [0.0] * mel_length

    for start, end, phone in phone_labels:
        if isinstance(phone, str):
            continue
        start_frame = int(start / duration_per_frame)
        end_frame = int(end / duration_per_frame)
        for i in range(start_frame, end_frame):
            if i < mel_length:
                aligned_phones[i] = phone

    for sentence, pinyin, prosody in prosody_labels:
        # 假设每个字的时长是均匀分布的
        word_duration = len(mel_spec[0]) / len(prosody)
        for i, prosody_level in enumerate(prosody):
            start_frame = int(i * word_duration)
            end_frame = int((i + 1) * word_duration)
            for j in range(start_frame, end_frame):
                if j < mel_length:
                    aligned_pitches[j] = prosody_level
                    aligned_energies[j] = prosody_level

    return aligned_phones, aligned_pitches, aligned_energies

def get_pinyin_dict(phones):
    all = []
    for phone in phones:
        index = 0
        for i in range(len(phone)):
            if phone[i][-1] == 'sil':
                index = i
                break
        all += [t[-1] for t in phone[index : -1]]
    pinyin_set = set()
    pinyin_set.update(all)
    pinyin_list = list(pinyin_set)
    phoneme_to_index = {phoneme: idx for idx, phoneme in enumerate(pinyin_list)}
    return phoneme_to_index

def phone_convert_2_tensor(phone_labels, dict):
    for now_label, phone_label in enumerate(phone_labels):
        index = 0
        for i in range(len(phone_label)):
            if phone_label[i][-1] == 'sil':
                index = i
                break
        for i in range(index , len(phone_label)):
            phone_labels[now_label][i] = (phone_labels[now_label][i][0], phone_labels[now_label][i][1], dict[phone_labels[now_label][i][2]])
    return phone_labels
    


class TTSDataset(Dataset): #数据加载器。
    def __init__(self, audio_files, phone_labels, prosody_labels, transform=None):
        self.audio_files = audio_files
        self.prosody_labels = prosody_labels
        self.transform = transform
        self.dict = get_pinyin_dict(copy.deepcopy(phone_labels))
        self.phone_labels = phone_convert_2_tensor(phone_labels, self.dict)
    def __len__(self):
        return len(self.audio_files)
    def __getitem__(self, idx):
        audio_file = self.audio_files[idx]
        phone_label = self.phone_labels[idx]
        prosody_label = self.prosody_labels[idx]
        # try:
        #     # 尝试获取数据
        #     audio_file = self.audio_files[idx]
        #     phone_label = self.phone_labels[idx]
        #     prosody_label = self.prosody_labels[idx]
        # except IndexError as e:
        #     # 捕获索引错误并打印相关信息
        #     print(f"IndexError: {e}. Index: {idx}, Audio Files Length: {len(self.audio_files)}, Phone Labels Length: {len(self.phone_labels)}, Prosody Labels Length: {len(self.prosody_labels)}")
        #     raise e  # 重新引发异常以便进一步处理或调试
        # except Exception as e:
        #     # 捕获所有其他类型的错误并打印相关信息
        #     print(f"Error: {e}. Index: {idx}")
        #     raise e  # 重新引发异常以便进一步处理或调试
            
        y, sr = load_audio(audio_file)
        mel_spec = extract_mel_spectrogram(y, sr)
        aligned_phones, aligned_pitches, aligned_energies = align_labels(mel_spec, phone_label, [prosody_label], sr, sil_index = self.dict['sil'])
        sample = {
            'mel_spec': mel_spec,
            'phones': aligned_phones,
            'pitches': aligned_pitches,
            'energies': aligned_energies
        }
        if self.transform:
            sample = self.transform(sample)
        return sample

# def collate_fn(batch):
#     max_len = max([sample['mel_spec'].shape[1] for sample in batch])
    
#     batch_size = len(batch)
#     mel_spec_batch = torch.zeros(batch_size, 80, max_len)
#     phones_batch = torch.zeros(batch_size, max_len, dtype=torch.long)
#     pitches_batch = torch.zeros(batch_size, max_len)
#     energies_batch = torch.zeros(batch_size, max_len)

#     for i, sample in enumerate(batch):
#         mel_len = sample['mel_spec'].shape[1]
#         mel_spec_batch[i, :, :mel_len] = torch.tensor(sample['mel_spec'])
#         phones_batch[i, :mel_len] = torch.tensor(sample['phones'])
#         pitches_batch[i, :mel_len] = torch.tensor(sample['pitches'])
#         energies_batch[i, :mel_len] = torch.tensor(sample['energies'])
    
#     return {
#         'mel_spec': mel_spec_batch,
#         'phones': phones_batch,
#         'pitches': pitches_batch,
#         'energies': energies_batch
#     }

def get_DataLoader(Wave_path = 'Data/Wave', PhoneLabel_path = 'Data/PhoneLabeling/', ProsodyLabel_path = 'Data/ProsodyLabeling/000001-010000.txt', batch_size=32, num_workers = 0):#ProsodyLabel_path有点特殊，它只有一个文件
    audio_files = get_all_waves_name(Wave_path) #音频太大了只能用到的时候再读取，不然我的现存要炸了
    phone_labels = get_ls_parse_textgrid(PhoneLabel_path)
    prosody_labels = parse_prosody_labels(ProsodyLabel_path)
    
    dataset = TTSDataset(audio_files, phone_labels, prosody_labels)
    #dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, collate_fn=collate_fn)
    dataloader = DataLoader(dataset, batch_size=32, shuffle=True, collate_fn = lambda x: x)#collate_fn=collate_fn)
    return dataloader, dataset.dict



def get_train_val_DataLoader(Wave_path='Data/Wave', PhoneLabel_path='Data/PhoneLabeling/', ProsodyLabel_path='Data/ProsodyLabeling/000001-010000.txt', batch_size=32, num_workers=0, train_ratio=0.8, validation_ratio=0.1):
    audio_files = get_all_waves_name(Wave_path)
    phone_labels = get_ls_parse_textgrid(PhoneLabel_path)
    prosody_labels = parse_prosody_labels(ProsodyLabel_path)

    dataset = TTSDataset(audio_files, phone_labels, prosody_labels)

    # 计算数据集的长度
    dataset_size = len(dataset)

    # 计算训练集和验证集的样本数量
    train_size = int(train_ratio * dataset_size)
    validation_size = int(validation_ratio * dataset_size)

    # 创建索引列表
    indices = list(range(dataset_size))
    # 打乱索引列表以确保数据随机性
    np.random.shuffle(indices)

    # 拆分索引列表为训练集和验证集的索引
    train_indices = indices[:train_size]
    validation_indices = indices[train_size:train_size + validation_size]

    # 使用 Subset 对象根据索引创建不同的数据集子集
    train_dataset = Subset(dataset, train_indices)
    validation_dataset = Subset(dataset, validation_indices)

    # 创建对应的 DataLoader
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, collate_fn = lambda x: x)
    validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn = lambda x: x)

    return train_dataloader, validation_dataloader, dataset.dict


