package org.shiqianghh.tts;

import java.io.File;
import java.math.BigDecimal;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import org.apache.commons.lang3.tuple.Pair;
import org.jitsi.webrtcvadwrapper.WebRTCVad;
import org.shiqianghh.chinese.Sequence;
import org.shiqianghh.model.ModelUtils;
import org.shiqianghh.tools.Program;
import org.shiqianghh.voice.FfmpegUtils;

import com.google.common.collect.Lists;
import com.jlibrosa.audio.JLibrosa;

import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDArrays;
import ai.djl.ndarray.NDList;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.index.NDIndex;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;

public class DataUtils {
	
	static float max_wav_value = 327680;
	static int mel_window_step = 10;
	static int partials_n_frames = 160;
	
	public static int samplerate = 0;
	public static int channels = 0;  
	
	public static void  tts(String audiopath,String  text,File outs,NDManager manager) throws Exception{
		
		//将原始数据转为模型需要的数据
		List<Object> dat = get_data(audiopath,text,manager);
		
		List<Integer> text_data_org = (List<Integer>) dat.get(0); 
		NDArray mel_data_org = (NDArray) dat.get(1);  
		int[] text_dataa = text_data_org.stream().mapToInt(Integer::intValue).toArray(); 
		NDList textlist = new NDList();
		NDArray text_data_temp = manager.create(text_dataa);
		textlist.add(text_data_temp);  
		
		NDList mellist = new NDList();
		mellist.add(mel_data_org);  
		
		NDArray text_data 		= text_data_temp ;
		text_data.setName("text");
		NDArray style_data 		= manager.create(0);
		NDArray speaker_data 	= (NDArray) dat.get(2);
		speaker_data.setName("speaker");
		NDArray f0_data 			= manager.create(0); 
		NDArray mel_data 		= NDArrays.stack(mellist);  
		
		//模型中只用了 下面两个数据，其他都是原有项目中需要的参数
		NDList input = new NDList();
		input.add(text_data);
		//input.add(style_data);
		input.add(speaker_data);
		//input.add(f0_data); 
		
		//进行mel数据生成
		Map<String,NDArray> result = ModelUtils.inference_chu(input);
		NDArray gates = result.get("gates");
		NDArray alignments = result.get("mels_postnet");
		NDArray mels_postnet = result.get("mels_postnet");
		NDArray mels = result.get("mels");
		
		alignments = alignments.transpose(1, 0);
		gates = gates.transpose(1, 0);
		NDArray out_gate = gates.get(0);
		//System.out.println(out_gate.toDebugString(1000000000, 1000, 1000, 1000));
		NDArray end_idx = out_gate.gt(0.2);  
		boolean[]  blidx = end_idx.toBooleanArray();
		int idx = 0;
		int size = blidx.length;
		for(int i = 0;i<size;i++){
			if(blidx[i]){
				idx = i;
			}
		}
		if(idx ==0){
			//System.out.println(out_gate.toDebugString(1000000000, 1000, 1000, 1000));
			//原来的数据是float32 argMax计算后编程了int64 转为int32对应java的int
			NDArray outg = out_gate.argMax().toType(DataType.INT32, false);
			System.out.println(outg.toDebugString(1000000000, 1000, 1000, 1000));
			int[] idxx = outg.toIntArray();
			System.out.println(Arrays.toString(idxx));
			idx = idxx[0];
		}
		if(idx ==0){
			idx = (int) out_gate.getShape().get(0);
		}  
		mels_postnet = mels_postnet.get(":, :"+idx);  
		
		//生成wav数据
		NDArray wavs = ModelUtils.generate_wave(mels_postnet,manager);  
		NDArray wav = ModelUtils.denoiser(wavs,manager);
		 
		//保存wav文件
		save_wav_wavfile(wav.get(0),1.0f,outs);
	}
	
	//float转为16int需要的 基数
	static int _int16_max = (int) (Math.pow(2, 15) -1); 
	
	/**
	 * 数据保存成wav文件
	 * @param wav
	 * @param volume
	 * @param outs
	 * @throws Exception
	 */
	public static void save_wav_wavfile(NDArray wav,float volume,File outs) throws Exception{ 
		NDArray out = wav.mul(_int16_max * volume).div(NDArrays.maximum(0.01, wav.abs().max()));
		Program.save(out.toFloatArray(),samplerate,channels,outs);  
	} 
	
	
	/*public static void transform_data_train(List<Integer> text_data,NDArray mel_data,NDArray speaker_data,NDManager manager){
		String mode = "rtvc";
		List<Integer> text = text_data;
		NDArray mel = mel_data;
		NDArray speaker = speaker_data;
		NDArray embed = manager.zeros(new Shape(256));
		String f0 = null;
		
	}*/
	
	/**
	 * 将原始声音提取为wav字节数组，将汉字转音律转字典id
	 * @param audiopath
	 * @param text
	 * @param speaker
	 * @param manager
	 * @return
	 * @throws Exception
	 */
	public static List<Object>  get_data(String audiopath,String  text,NDManager manager) throws Exception{
		
		List<Integer> txt = get_text(text);
		
		NDArray audio_norm = FfmpegUtils.load_wav_to_torch(audiopath, 22050);  
		
		samplerate = FfmpegUtils.samplerate;
		channels = FfmpegUtils.channels; 
		
		NDArray audio = audio_norm.mul(max_wav_value);  
		
		NDArray mel = ModelUtils.get_mel(audio_norm);  
		//提取有效音频端，
		audio_norm = trim_long_silences(audio_norm);
		//System.out.println(audio_norm.toDebugString(1000000000, 1000, 1000, 1000));
		Pair<LinkedList<LinkedList<Integer>>,LinkedList<LinkedList<Integer>>> slices = compute_partial_slices(audio_norm.size(),partials_n_frames,0.75f,0.5f);
		LinkedList<LinkedList<Integer>> wave_slices = slices.getLeft();
		LinkedList<LinkedList<Integer>> mel_slices = slices.getRight();
		int max_wave_length = wave_slices.getLast().getLast();  
	    if (max_wave_length >= audio_norm.size()){
	    	audio_norm = pad(audio_norm, (max_wave_length - audio_norm.size()),manager);
	    } 
	    float[][] fframes = wav_to_mel_spectrogram(audio_norm);
	    NDArray frames = manager.create(fframes).transpose(); 
	    NDList frameslist = new NDList();
	    for(LinkedList<Integer> s : mel_slices){
	    	NDArray temp = ModelUtils.embed(frames.get(s.getFirst()+":"+s.getLast())); 
	    	frameslist.add(temp); 
	    }   
	    NDArray partial_embeds = NDArrays.stack(frameslist);
	    NDArray raw_embed = partial_embeds.mean(new int[] {0});  
	    NDArray embed = raw_embed.div(((raw_embed.pow(2)).sum()).sqrt());
	    
	    /*  
	     *  给数据增加一个维度
	     *  NDList speakerList = new NDList();
	    	speakerList.add(embed);
	    	NDArray speakerNd = NDArrays.stack(speakerList); */
	    
	    List<Object> result = Lists.newArrayList();
	    result.add(txt);
	    result.add(mel);
	    result.add(embed);
	    return result;
		
	} 
	/**
	 * 从wav提取mel特征值
	 * @param wav
	 * @return
	 */
	public static float[][] wav_to_mel_spectrogram(NDArray wav){
		JLibrosa librosa = new JLibrosa(); 											//(16000 * 25 / 1000),40
		float [][] melSpectrogram = librosa.generateMelSpectroGram(wav.toFloatArray(), 16000, 1024, 40, (16000 * 10 / 1000));
		return melSpectrogram;
	}
	
	/**
	 * 数据补齐到padl长度
	 * @param wav
	 * @param padl
	 * @param manager
	 * @return
	 */
	public static NDArray pad(NDArray wav,long padl,NDManager manager){ 
		wav = wav.concat(manager.zeros(new Shape(padl))); 
		return wav;
	}
	
	//对音频进行切片
	public static Pair compute_partial_slices(long n_samples,int partial_utterance_n_frames,
            float min_pad_coverage,float overlap){
		int samples_per_frame = (int)(16000 * mel_window_step / 1000);
	    int n_frames = (int)(Math.ceil((n_samples + 1) / samples_per_frame));
	    int frame_step = Math.max((Math.round(partial_utterance_n_frames * (1 - overlap))), 1); 
	    // Compute the slices
	    LinkedList<LinkedList<Integer>> wav_slices = Lists.newLinkedList();
	    LinkedList<LinkedList<Integer>> mel_slices = Lists.newLinkedList();
	    
	    int steps = Math.max(1, n_frames - partial_utterance_n_frames + frame_step + 1);
	    for(int i=0;i<steps;i+=frame_step){ 
	    	LinkedList<Integer> mel_range = Lists.newLinkedList();
	        mel_range.add(i);
	        mel_range.add(i + partial_utterance_n_frames);  
	        LinkedList<Integer> wav_range = Lists.newLinkedList(); 
	        wav_range.add(i * samples_per_frame);
	        wav_range.add((i + partial_utterance_n_frames) * samples_per_frame); 
	        mel_slices.add(mel_range);
	        wav_slices.add(wav_range);
	    } 
	    // Evaluate whether extra padding is warranted or not
	    LinkedList<Integer> last_wav_range = wav_slices.getLast();
	    float coverage =(float) (n_samples - last_wav_range.getFirst()) / (last_wav_range.getLast() - last_wav_range.getFirst());
	    if (coverage < min_pad_coverage && mel_slices.size() > 1){
	        mel_slices.removeLast();
	        wav_slices.removeLast();
	    } 
	    
	    return Pair.of(wav_slices,mel_slices);
	}
	
	
	
	public static int audio_norm_target_dBFS = -30;
	public static int vad_window_length = 30; // In milliseconds 
	public static int sampling_rate = 16000;
	public static int vad_moving_average_width = 8;
	public static int vad_max_silence_length = 6;
	public static double int16_max = Math.pow(2f,15f)- 1;
	
	/**
	 * 音频段提取，将音频中有声音的部分进行提取
	 * @param wav
	 * @return
	 * @throws Exception
	 */
	public static NDArray trim_long_silences(NDArray wav) throws Exception{
		NDManager manager = NDManager.newBaseManager(Device.cpu()); 
	    /*Ensures that segments without voice in the waveform remain no longer than a 
	    threshold determined by the VAD parameters in params.py.

	    :param wav: the raw waveform as a numpy array of floats 
	    :return: the same waveform with silences trimmed away (length <= original wav length)*/
	    
	    // Compute the voice detection window size
	    int samples_per_window = (vad_window_length * sampling_rate) / 1000;

	    // Trim the end of the audio to have a multiple of the window size 
	    wav = wav.get(":"+(wav.size() - (wav.size() % samples_per_window)));
	   
	    // Convert the float waveform to 16-bit mono PCM 
	   // wav = wav.mul(int16_max).round().toType(DataType.INT8, false);
	    
	    NDArray wavs = wav.mul(int16_max).round().toType(DataType.INT32, false);
	    int[] bit16  = wavs.toIntArray();  
	   
	    //Struct struct = new Struct();
	    //byte[] pcm_wave = struct.pack("@"+wav.size()+"h", wav.toLongArray()); 
	    
	    //Convert.shortToBytes(shortValue);
	    //byte[] pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
	    //byte[] apcm_wave = JBBPOut.BeginBin().Short(wav.toUint8Array()).End().toByteArray();
	   
	    //byte[] pcm_wavee = Arrays.copyOfRange(apcm_wave,1,apcm_wave.length);
	     
	    
		// Perform voice activation detection 
	    // VAD vad = new VAD(); 
	    //vad = webrtcvad.Vad(mode=3)
	    
	    
	    
	    LinkedList<Integer> voice_flags = Lists.newLinkedList();
	    long size = wav.size();
	    for(int window_start=0;window_start<size;window_start += samples_per_window){
	    	int window_end = window_start + samples_per_window;  
	    	voice_flags.add(isSpeech(Arrays.copyOfRange(bit16, window_start,window_end),samples_per_window));
	    }
	    
	        /*  voice_flags = np.array(voice_flags) 
	    // Smooth the voice detection with a moving average
	    def moving_average(array, width):
	        array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
	        ret = np.cumsum(array_padded, dtype=float)
	        ret[width:] = ret[width:] - ret[:-width]
	        return ret[width - 1:] / width*/

	    NDArray audio_mask = moving_average(voice_flags, vad_moving_average_width,manager); 
	    audio_mask = audio_mask.round().toType(DataType.BOOLEAN, true); 
	    boolean[] audio_mask1 = audio_mask.toBooleanArray();
	   
	    audio_mask = binary_dilation(audio_mask1, vad_max_silence_length + 1,manager);
	    audio_mask = audio_mask.repeat(samples_per_window); 
	    return  wav.get(audio_mask.eq(1));
	}
	
	/**
	 * 一维音频 腐蚀膨胀
	 * @param mask
	 * @param vad_max_silence_length
	 * @param manager
	 * @return
	 */
	public static NDArray binary_dilation(boolean[] mask,int vad_max_silence_length,NDManager manager){
		int size = mask.length;
		//数组位置标记
		int count = 0;
		//标记上一个位置值
		boolean lastpad = false;
		//膨胀大小
		int padlen = vad_max_silence_length / 2;
		for(int i=0;i<size;i++){
			
			if(mask[i]){
				//向左腐蚀 padlen
				int j = 0;
				while(padlen >= j){
					int k = i-j;
					if(k==0){
						break;
					}
					mask[k] = true;
					j++;
				}
				//向右腐蚀padlen 同时 i移动 padlen 
				j = 0;
				while(padlen >= j){
					int k = i+j;
					if(k>=size){
						break;
					}
					mask[k] = true;
					j++;
				}
				i = i+padlen;
				if(i>=size){
					break;
				}
			}
		} 
		return manager.create(mask);
	}
	
	public static NDArray moving_average(LinkedList<Integer>array,int width,NDManager manager){
		int size = (width - 1) / 2;
		for(int i=0;i<size;i++){
			array.addFirst(0); 
		}
		size = width / 2;
		for(int i=0;i<size;i++){
			array.addLast(0); 
		}
		LinkedList<Integer> array_padded = array;
		
		NDArray ret = manager.create(cumsum(array_padded.stream().toArray(Integer[]::new)));
		ret.set(new NDIndex(width+":"),ret.get(width+":").sub(ret.get(":-"+width))); 
	    return ret.get(width - 1+":").div(width);
	}
	
	public static float[] cumsum(Integer[] in) {
		float[] out = new float[in.length];
	    int total = 0;
	    for (int i = 0; i < in.length; i++) {
	        total += in[i];
	        out[i] = total;
	    }
	    return out;
	}
	/*public static NDArray normalize_volume(NDArray wav,boolean increase_only,boolean decrease_only){ 
		 
		NDArray rms = wav.mul(int16_max).mean().sqrt();
		NDArray wave_dBFS =  rms.div(int16_max).log10().mul(20);
		NDArray dBFS_change = wave_dBFS.mul(-1).add(audio_norm_target_dBFS);
	    if dBFS_change.lt(0)  increase_only or dBFS_change > 0 and decrease_only:
	        return wav
	    return wav * (10 ** (dBFS_change / 20))
	}*/
	
	public static int sampleSizeInBits = 16; //位深度
	 
	/**
	 * 检测音频中的有声段
	 * @param pcm_wave
	 * @param samples_per_window
	 * @return
	 */
	public static  int isSpeech(int[] pcm_wave,int samples_per_window){
		 // 使用tarsos检测静音
		/*TarsosDSPAudioFormat tdspFormat = new TarsosDSPAudioFormat(sampling_rate, sampleSizeInBits, 1, true, false);
        float[] voiceFloatArr = new float[samples_per_window * 2/tdspFormat.getFrameSize()]; 
        TarsosDSPAudioFloatConverter audioFloatConverter = TarsosDSPAudioFloatConverter.getConverter(tdspFormat);
        audioFloatConverter.toFloatArray(pcm_wave,voiceFloatArr);
       // System.out.println(Arrays.toString(voiceFloatArr));
        SilenceDetector silenceDetector = new SilenceDetector();
        boolean isSlience = silenceDetector.isSilence(voiceFloatArr);*/
		
		/*TarsosDSPAudioFormat tdspFormat = new TarsosDSPAudioFormat(sampling_rate, sampleSizeInBits, 1, false, false);
        float[] voiceFloatArr = new float[samples_per_window * 2 /tdspFormat.getFrameSize()]; 
        TarsosDSPAudioFloatConverter audioFloatConverter = TarsosDSPAudioFloatConverter.getConverter(tdspFormat);
        audioFloatConverter.toFloatArray(pcm_wave,voiceFloatArr);
        System.out.println(Arrays.toString(voiceFloatArr));*/
		WebRTCVad vad = new WebRTCVad(16000, 3);
	 
		boolean isSlience = vad.isSpeech(pcm_wave); 
        return isSlience?1:0;
	}
	 
	static int n_bytes=2;
	
	/*private static int[]  buf_to_16int(byte[] frame){ 
		int size = frame.length / n_bytes;
		int[]  framei = new int[size];
		for(int i=0;i<size;i++){
			framei[i] = IntegerConversion.convertTwoBytesToInt1(frame[2*i],frame[2*i+1]);
		}   
		return framei;
	}*/
	
	public static double[] conv(float[] fs){
		int size = fs.length;
		double[] db = new double[size];
		for(int i=0;i<size;i++){ 
			BigDecimal b = new BigDecimal(String.valueOf(fs[i]));  
			db[i] = b.doubleValue();  
		} 
		return db;
	}
	/**
	 * 汉字转字典
	 * @param text
	 * @return
	 */
	public static List<Integer> get_text(String text){
		return Sequence.text2sequence(text);
	} 
	
}
