import librosa
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import os
from torch.nn.utils.rnn import pad_sequence
import copy
from torch.utils.data import Subset
import re
import torch.nn as nn



def get_sentences(path):
    with open(path, "r") as file:
        # 初始化一个空列表来存储句子
        sentences = []
        # 逐行读取文件
        for line in file:
            # 使用正则表达式找到汉字部分
            chinese_chars = ''.join(re.findall(r'[\u4e00-\u9fa5]', line))
            # 添加汉字部分到列表中
            if chinese_chars:
                sentences.append(chinese_chars)
        return sentences

def load_audio(file_path):
  y, sr = librosa.load(file_path, sr=22050)
  return y, sr

    
def get_all_waves_name(Wave_path):
    files = []
    for file in os.listdir(Wave_path):
        if os.path.isfile(os.path.join(Wave_path, file)) and file.endswith('.wav'):
            files.append(os.path.join(Wave_path, file))
    return files

def get_words_dict(sentences):
    all = []
    for sentence in sentences:
        all += list(sentence) 
    words_set = set()
    words_set.update(all)
    words_list = list(words_set)
    words_dict = {word: idx for idx, word in enumerate(words_list)}
    words_dict['UNK'] = -1
    return words_dict


def get_embedding(vocab_size, embedding_dim):
    embedding = nn.Embedding(vocab_size, embedding_dim)
    return embedding


class TTSDataset(Dataset): #数据加载器。
    def __init__(self, audio_files, sentences, embedding_dim = 100 ,transform=None):
        self.audio_files = audio_files
        self.dict = get_words_dict(copy.deepcopy(sentences))
        self.transform = transform
        self.embedding = get_embedding(len(self.dict), embedding_dim)
        self.sentences = [
            self.embedding(
                torch.LongTensor([self.dict[word] for word in sentence])
            ) 
            for sentence in sentences
        ]
    def __len__(self):
        return len(self.audio_files)
    def __getitem__(self, idx):
        audio_file = self.audio_files[idx]
        sentence = self.sentences[idx]
        y, sr = load_audio(audio_file)
        sample = {
            'audio_wave': y,
            'sr': sr,
            'sentence': sentence
        }
        if self.transform:
            sample = self.transform(sample)
        return sample



def get_DataLoader(Wave_path = 'Data/Wave', ProsodyLabel_path = 'Data/ProsodyLabeling/000001-010000.txt', batch_size=32, num_workers = 0, embedding_dim = 100):
    audio_files = get_all_waves_name(Wave_path) #音频太大了只能用到的时候再读取，不然我的现存要炸了
    sentences = get_sentences(ProsodyLabel_path)
    
    dataset = TTSDataset(audio_files, sentences, embedding_dim)
    dataloader = DataLoader(dataset, batch_size=32, shuffle=True, collate_fn = lambda x: x)#collate_fn=collate_fn)
    return dataloader, dataset.dict





def get_train_val_DataLoader(Wave_path = 'Data/Wave', ProsodyLabel_path = 'Data/ProsodyLabeling/000001-010000.txt', batch_size=32, num_workers=0, train_ratio=0.8, validation_ratio=0.1, embedding_dim = 100):
    audio_files = get_all_waves_name(Wave_path) #音频太大了只能用到的时候再读取，不然我的现存要炸了
    sentences = get_sentences(ProsodyLabel_path)
    
    dataset = TTSDataset(audio_files, sentences, embedding_dim)

    # 计算数据集的长度
    dataset_size = len(dataset)

    # 计算训练集和验证集的样本数量
    train_size = int(train_ratio * dataset_size)
    validation_size = int(validation_ratio * dataset_size)

    # 创建索引列表
    indices = list(range(dataset_size))
    # 打乱索引列表以确保数据随机性
    np.random.shuffle(indices)

    # 拆分索引列表为训练集和验证集的索引
    train_indices = indices[:train_size]
    validation_indices = indices[train_size:train_size + validation_size]

    # 使用 Subset 对象根据索引创建不同的数据集子集
    train_dataset = Subset(dataset, train_indices)
    validation_dataset = Subset(dataset, validation_indices)

    # 创建对应的 DataLoader
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, collate_fn = lambda x: x)
    validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn = lambda x: x)

    return train_dataloader, validation_dataloader, dataset.dict





