package com.yyds.tensordemo;

import android.content.Context;
import android.util.Log;

import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;

import be.tarsos.dsp.AudioDispatcher;
import be.tarsos.dsp.AudioEvent;
import be.tarsos.dsp.AudioProcessor;
import be.tarsos.dsp.io.TarsosDSPAudioFormat;
import be.tarsos.dsp.io.UniversalAudioInputStream;
import be.tarsos.dsp.io.android.AudioDispatcherFactory;
import be.tarsos.dsp.mfcc.MFCC;

public class AudioPreprocessor {
    private String audioFilePath;

    public AudioPreprocessor(String audioFilePath) {
        this.audioFilePath = audioFilePath;
    }

    /**
     * 提取音频的 MFCC 特征
     *
     * @param targetLength 填充或截断后的时间步长度
     * @param nMfcc MFCC 特征维度
     * @return 填充或截断后的特征数组，形状为 [1, targetLength, nMfcc]
     */
    public float[][][] extractFeatures(int targetLength, int nMfcc) {
        try {
            int sampleRate = 44100; // 采样率
            int bufferSize = 512;   // 帧大小
            int overlap = 256;      // 帧重叠大小

            AudioDispatcher dispatcher = AudioDispatcherFactory.fromPipe(audioFilePath, sampleRate, bufferSize, overlap);
            MFCC mfcc = new MFCC(bufferSize, sampleRate, nMfcc, 20, 300, 3000);

            List<float[]> mfccList = new ArrayList<>();
            dispatcher.addAudioProcessor(mfcc);
            dispatcher.addAudioProcessor(new AudioProcessor() {
                @Override
                public boolean process(AudioEvent audioEvent) {
                    mfccList.add(mfcc.getMFCC());
                    return true;
                }

                @Override
                public void processingFinished() {
                    // 处理完成后的操作
                }
            });

            dispatcher.run();

            // 转换为二维数组
            float[][] mfccArray = mfccList.toArray(new float[0][nMfcc]);
            return padOrTruncateFeatures(mfccArray, targetLength, nMfcc);

        } catch (Exception e) {
            e.printStackTrace();
            return null;
        }
    }
    public void  extractFeatures(int nMfcc) {
        try {
            int sampleRate = 44100; // 采样率
            int bufferSize = 512;   // 帧大小
            int overlap = 256;      // 帧重叠大小

            AudioDispatcher dispatcher = AudioDispatcherFactory.fromPipe(audioFilePath, sampleRate, bufferSize, overlap);
            MFCC mfcc = new MFCC(bufferSize, sampleRate, nMfcc, 20, 300, 3000);

            List<float[]> mfccList = new ArrayList<>();
            dispatcher.addAudioProcessor(mfcc);
            dispatcher.addAudioProcessor(new AudioProcessor() {
                @Override
                public boolean process(AudioEvent audioEvent) {
                    Log.i("TAO", "process: "+audioEvent.toString());
                    mfccList.add(mfcc.getMFCC());
                    return true;
                }

                @Override
                public void processingFinished() {
                    // 处理完成后的操作
                }
            });

            new Thread(dispatcher).start();


        } catch (Exception e) {
            e.printStackTrace();

        }
    }



    public float[][][]  extractFeatures(Context context,int targetLength, int nMfcc) {
        try {
            CountDownLatch latch=new CountDownLatch(1);
            int sampleRate = 44100; // 采样率
            int bufferSize = 512;   // 帧大小
            int overlap = 256;      // 帧重叠大小

            InputStream inStream = context.getAssets().open("audio1.wav");
            AudioDispatcher dispatcher = new AudioDispatcher(new UniversalAudioInputStream(inStream, new TarsosDSPAudioFormat(sampleRate, bufferSize, 1, true, true)), bufferSize, overlap);
            MFCC mfcc = new MFCC(bufferSize, sampleRate, nMfcc, 20, 300, 3000);

            List<float[]> mfccList = new ArrayList<>();
            dispatcher.addAudioProcessor(mfcc);
            dispatcher.addAudioProcessor(new AudioProcessor() {
                @Override
                public boolean process(AudioEvent audioEvent) {
                    mfccList.add(mfcc.getMFCC());
                    return true;
                }

                @Override
                public void processingFinished() {
                    // 处理完成后的操作
                    Log.i("TAO", "processingFinished: ");
                    latch.countDown();
                }
            });

            new Thread(dispatcher).start();

            latch.await();
            float[][] mfccArray = mfccList.toArray(new float[0][nMfcc]);
            return padOrTruncateFeatures(mfccArray, targetLength, nMfcc);

        } catch (Exception e) {
            e.printStackTrace();
            return null;
        }
    }

    /**
     * 根据特征生成对应的 mask
     *
     * @param features 输入特征
     * @return mask 数组，形状为 [1, targetLength]
     */
    public float[][] generateMask(float[][][] features) {
        int timeSteps = features[0].length;
        float[][] mask = new float[1][timeSteps];
        for (int i = 0; i < timeSteps; i++) {
            boolean isNonZero = false;
            for (float value : features[0][i]) {
                if (value != 0) {
                    isNonZero = true;
                    break;
                }
            }
            mask[0][i] = isNonZero ? 1.0f : 0.0f;
        }
        return mask;
    }

    /**
     * 填充或截断特征数组到目标长度
     *
     * @param features 原始特征数组
     * @param targetLength 目标时间步长度
     * @param nMfcc MFCC 特征维度
     * @return 填充或截断后的特征数组，形状为 [1, targetLength, nMfcc]
     */
    private float[][][] padOrTruncateFeatures(float[][] features, int targetLength, int nMfcc) {
        float[][][] paddedFeatures = new float[1][targetLength][nMfcc];

        for (int i = 0; i < targetLength; i++) {
            if (i < features.length) {
                paddedFeatures[0][i] = features[i];
            } else {
                paddedFeatures[0][i] = new float[nMfcc]; // 填充 0
            }
        }

        return paddedFeatures;
    }
}
