import os
from torch.utils.data import Dataset
import numpy as np
from bs37ToARKit import ARKitBlendShapeLocation
import torchaudio
import PyWave
import json
import pickle
import random
import torch


def GetWaveInfo(wavPath):  
    wf = PyWave.open(wavPath)
    print(f"This WAVE file [{wavPath}] has the following properties:")
    print(wf.channels, "channels")
    print(wf.frequency, "Hz sample rate")
    print(wf.bitrate, "bits per second")
    print(wf.samples, "total samples")
    print(wf.data_length, "data bytes")
    duration = (float)(wf.samples) / wf.frequency   # 音频持续时间 单位：秒
    print(f'duration : {duration}')
    return wf.frequency
        
class DataPreprocessor():
    def __init__(self, base_dir, max_count=0, shuffle=False,start_count = 0):
        self.max_count = max_count
        self.shuffle = shuffle
        self.start_count = start_count
        self.fileMap = {}            
        
        for root, _, files in os.walk(base_dir):
             for file in files:
                if file.endswith('.wav'):
                    self.fileMap[os.path.join(root,file)] = ''
        
        labelFileTypes = ['.json','.npy']
        for root, _, files in os.walk(base_dir):
             for file in files:
                for labelFileType in labelFileTypes:
                    if file.endswith(labelFileType):
                        wavName = os.path.join(root,file).replace(labelFileType,'.wav')
                        if wavName in self.fileMap:
                            self.fileMap[wavName] = labelFileType

    def Save(self,save_path):
            
        keys =  list(self.fileMap.keys())    
        if self.shuffle:     
            random.shuffle(keys)
        finalData = []
        for i, key in enumerate(keys):            
            if self.max_count > 0:
                if self.start_count > i or i > self.start_count + self.max_count:
                    continue
            else:
                if self.start_count > i:
                    continue
            wav_path = key
            label_type = self.fileMap[key]
            GetWaveInfo(wav_path)
            waveform, sample_rate = torchaudio.load(wav_path)
            blendshape = self.LoadAnno(wav_path.replace('.wav',label_type))
            wavedata = waveform.numpy()[0]
            data_pair = {"wavedata":wavedata,
                    "blendshape":blendshape}
            finalData.append(data_pair)
            
            print(f'read file {i} {wav_path}')
                    
        with open(save_path, 'wb') as f:
                pickle.dump(finalData, f)
            
    def LoadAnno(self, anno):
        blendshape_temp = []
        if str.endswith(anno,'.json'):
            with open(anno,'r',encoding='utf8')as fp:
                file_contents = fp.read()                
                parsed_json = json.loads(file_contents)

                lastTime = parsed_json['frames'][-1]['time']
                frameCount = len(parsed_json['frames'])
        
                for j in range(len(parsed_json['frames'])):
                    if j%2 != 0:
                        continue
                    item = parsed_json['frames'][j]
                    weights = []
                    for i,num in enumerate(item['weights']):
                        if ARKitBlendShapeLocation.JawForward.value - 1 <= i and i <= ARKitBlendShapeLocation.MouthUpperUpRight.value - 1:
                            if num < 0:
                                num = 0
                            weights.append((float)(num))
                        else:
                            weights.append(0.0)
                    blendshape_temp.append(weights)
            blendshape_temp = np.array(blendshape_temp,dtype=np.float32)
            
        elif str.endswith(anno,'.npy'):
            blendshape_temp = np.load(anno)
            blendshape_temp = blendshape_temp.astype(np.float32)
            for i in range(blendshape_temp.shape[1]):
                if ARKitBlendShapeLocation.JawForward.value - 1 <= i and i <= ARKitBlendShapeLocation.MouthUpperUpRight.value - 1:
                    # blendshape_temp[:,i]
                    for j in range(blendshape_temp.shape[0]):
                        if blendshape_temp[j,i] < 0:
                            blendshape_temp[j,i] = 0
                else:
                    blendshape_temp[:,i] = 0
                    pass
            blendshape_temp = blendshape_temp[:,:-1]
        return blendshape_temp
    

 
class WavBlendshapeDataset(Dataset):

    def __init__(self, file, win_length = 0.52,fps = 30,sr = 16000):
        
        with open(file, 'rb') as f:
            self.data = pickle.load(f)
            
        
        self.fps = fps
        self.sr = sr
        self.win_length = win_length

        self.pad_wavedata = []
        self.bs = []
        self.bs_waveindex = []
        self.bs_bsindex = []
        for data_idx, data_pair in enumerate(self.data):
            wavedata = data_pair['wavedata']
            blendshape = data_pair['blendshape']
            
            l_pad_samples = int((self.sr * self.win_length / 2))
            n_pad_samples = int(self.sr * self.win_length / 2)
            wavedata = torch.Tensor(wavedata)
            pad_wavedata = torch.concatenate([torch.zeros(l_pad_samples), wavedata, torch.zeros(2 * n_pad_samples)])
            self.pad_wavedata.append(pad_wavedata)
            
            for idx , bs in enumerate(blendshape):          
                if idx > self.fps * self.win_length and idx < blendshape.shape[0] - self.fps * self.win_length:  
                    self.bs_bsindex.append(idx)
                    self.bs_waveindex.append(data_idx)
                    self.bs.append(bs)
                    
        print(len(self.bs))       
        

    def __len__(self):
        return len(self.bs)

        

    def __getitem__(self, index):
        wavedata = self.pad_wavedata[self.bs_waveindex[index]]
        idx = self.bs_bsindex[index]
        
        fragment = self.get_audio_fragment(wavedata,idx)  
        return fragment, self.bs[index][ARKitBlendShapeLocation.JawForward.value - 1:ARKitBlendShapeLocation.MouthUpperUpRight.value]
    
           
    def get_audio_fragment(self, audio,idx):

        n_pad_samples = int(self.sr * self.win_length / 2)
        start = idx * self.sr // self.fps
        end = start + n_pad_samples * 2
        # check if the audio is long enough
        if end > len(audio):
            print(f"Audio is not long enough to get fragment: {end} > {len(audio)}")
            return None
        return audio[start:end]
      
