import numpy as np
import torch
from torch.utils.data import Dataset
import json
import random
from pathlib import Path
import os


class CPCDataset_sameSeq(Dataset):
    def __init__(self, root, n_sample_frames, mode):
        self.root = Path(root)
        self.n_sample_frames = n_sample_frames

        self.speakers = sorted(os.listdir(root/f'{mode}/mels'))
        
        with open(self.root / f"{mode}.json") as file:
            metadata = json.load(file)
        self.metadata = []
        for mel_len, mel_out_path, lf0_out_path,wav2vec_path in metadata:
            # if mel_len > n_sample_frames: # only select wavs having frames>=140
            # mel_out_path.replace("/home/wang/codes/py","/opt/data/private")
            # lf0_out_path.replace("/home/wang/codes/py","/opt/data/private")
            # wav2vec_path.replace("/home/wang/codes/py","/opt/data/private")

            mel_out_path="/opt/data/private"+mel_out_path[19:]
            lf0_out_path="/opt/data/private"+lf0_out_path[19:]
            wav2vec_path="/opt/data/private"+wav2vec_path[19:]

            mel_out_path = Path(mel_out_path)
            lf0_out_path = Path(lf0_out_path)
            wav2vec_path=Path(wav2vec_path)
            speaker = mel_out_path.parent.stem
            self.metadata.append([speaker, mel_out_path, lf0_out_path,wav2vec_path])
        print('n_sample_frames:', n_sample_frames, 'metadata:', len(self.metadata))
        random.shuffle(self.metadata)

    def __len__(self):
        return len(self.metadata)

    def __getitem__(self, index):
        speaker, mel_path, lf0_path ,wav2vec_path= self.metadata[index]
        # print("aaa",mel_path)

        # mel_path = self.root.parent / mel_path
        # lf0_path = self.root.parent / lf0_path
        # wav2vec_path=self.root.parent/wav2vec_path


        # mel_path=mel_path[:18]+"/opt/data/private"
        # lf0_path=lf0_path[:18]+"/opt/data/private"
        # wav2vec_path=wav2vec_path[:18]+"/opt/data/private"

        # print("aaa",mel_path)

        mel = np.load(mel_path).T
        # print("mel",mel.shape)
        lf0 = np.load(lf0_path)
        wav2vec=np.load(wav2vec_path).T
        # print("wav2vec",wav2vec.shape)
        melt = mel
        wav2vect=wav2vec
        lf0t = lf0
        while mel.shape[-1] < self.n_sample_frames:
            mel = np.concatenate([mel, melt], -1)
            lf0 = np.concatenate([lf0, lf0t], 0)
        while wav2vec.shape[-1] < self.n_sample_frames//2:
            wav2vec=np.concatenate([wav2vec,wav2vect],-1)
            
            
        zero_idxs = np.where(lf0 == 0.0)[0]
        nonzero_idxs = np.where(lf0 != 0.0)[0]
        if len(nonzero_idxs) > 0 :
            mean = np.mean(lf0[nonzero_idxs])
            std = np.std(lf0[nonzero_idxs])
            if std == 0:
                lf0 -= mean
                lf0[zero_idxs] = 0.0
            else:
                lf0 = (lf0 - mean) / (std + 1e-8)
                lf0[zero_idxs] = 0.0
        
        
        # wav2vec归一化
        mean = np.mean(wav2vec,axis=1).reshape(-1,1)
        std = np.std(wav2vec,axis=1).reshape(-1,1)
        wav2vec=(wav2vec - mean) / (std + 1e-8)

        # pos = random.randint(0, mel.shape[-1] - self.n_sample_frames)
        wav2vec_pos = random.randint(0, wav2vec.shape[-1] - self.n_sample_frames//2)
        pos=wav2vec_pos*2
        mel = mel[:, pos:pos + self.n_sample_frames]
        lf0 = lf0[pos:pos + self.n_sample_frames]
        # wav2vec_pos = random.randint(0, wav2vec.shape[-1] - self.n_sample_frames//2)
        # wav2vec_pos = pos//2
        wav2vec = wav2vec[:, wav2vec_pos:wav2vec_pos + self.n_sample_frames//2]
        # print("mel",mel.shape)
        # print("wav2vec",wav2vec.shape)
        return torch.from_numpy(mel), torch.from_numpy(lf0),torch.from_numpy(wav2vec), self.speakers.index(speaker)




