from torch.utils.data import Dataset
import numpy as np
from bs37ToARKit import ARKitBlendShapeLocation

class BlendshapeDataset(Dataset):

    def __init__(self, feature_file, target_file,max_len = 0):
        self.wav_feature = np.load(feature_file)
        # reshape to avoid automatic conversion to doubletensor
        
        if str.endswith(target_file,'.npy'):
            self.blendshape_target = np.load(target_file)
        elif str.endswith(target_file,'.txt'):
            self.blendshape_target = np.loadtxt(target_file)
            self.blendshape_target =  self.blendshape_target
        
        if max_len > 0:
            indices = np.arange(self.wav_feature.shape[0])
            np.random.shuffle(indices)
            self.wav_feature = self.wav_feature[indices]
            self.blendshape_target = self.blendshape_target[indices]
            self.wav_feature = self.wav_feature[:max_len]
            self.blendshape_target = self.blendshape_target[:max_len]
        # self._align()

    def __len__(self):
        return len(self.wav_feature)

    def _align(self):
        """
            align audio feature with blendshape feature
            generally, number of audio feature is less
        """

        # n_audioframe, n_videoframe = len(self.wav_feature), len(self.blendshape_target)
        # print('Current dataset -- n_videoframe: {}, n_audioframe:{}'.format(n_videoframe, n_audioframe))
        # assert n_videoframe - n_audioframe <= 40
        # if n_videoframe != n_audioframe:
        #     start_videoframe = 16
        #     self.blendshape_target = self.blendshape_target[start_videoframe : start_videoframe+n_audioframe]

    def __getitem__(self, index):

        # return self.wav_feature[index], self.blendshape_target[index]
        return self.wav_feature[index], self.blendshape_target[index,ARKitBlendShapeLocation.JawForward.value - 1:ARKitBlendShapeLocation.MouthUpperUpRight.value]