package com.jxw.cloudpen.web.microsoft.stremspeed;

/**
 * @author ligang
 * @create 2025/4/24 11:09
 */

import com.microsoft.cognitiveservices.speech.*;
import com.microsoft.cognitiveservices.speech.audio.AudioConfig;
import com.microsoft.cognitiveservices.speech.audio.AudioInputStream;
import com.microsoft.cognitiveservices.speech.audio.AudioStreamFormat;
import com.microsoft.cognitiveservices.speech.audio.PushAudioInputStream;

import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;

public class MyStreamSpeedTest {
    static SpeechRecognizer recognizer;
    static PushAudioInputStream pushStream;
    static SpeechConfig config;
    static AudioConfig audioInput;

    public static void main(String[] args) throws InterruptedException, ExecutionException, IOException, URISyntaxException, Exception {
        open("en-US");
        pronunciationAssessmentWithPushStream();
        pronunciationAssessmentWithPushStream();
        pronunciationAssessmentWithPushStream();
        close();
    }

    // The Source to stop recognition.
    private static Semaphore stopRecognitionSemaphore;

    public static void open(String lang) throws Exception {
        config = SpeechConfig.fromEndpoint(new URI("https://southeastasia.api.cognitive.microsoft.com/"),
                "bc9a5386c73947948aaaedf6d115f87c");
//        String lang = "en-US";
        pushStream = AudioInputStream.createPushStream();
        // Creates a speech recognizer using Push Stream as audio input.
        audioInput = AudioConfig.fromStreamInput(pushStream);
        recognizer = new SpeechRecognizer(config, lang, audioInput);

        // Subscribes to events.
        recognizer.recognizing.addEventListener((s, e) -> {
            System.out.println("RECOGNIZING: Text=" + e.getResult().getText());
        });

        recognizer.recognized.addEventListener((s, e) -> {
            if (e.getResult().getReason() == ResultReason.RecognizedSpeech) {
                System.out.println("RECOGNIZED: Text=" + e.getResult().getText());
            } else if (e.getResult().getReason() == ResultReason.NoMatch) {
                System.out.println("NOMATCH: Speech could not be recognized.");
            }
        });

        recognizer.canceled.addEventListener((s, e) -> {
            System.out.println("CANCELED: Reason=" + e.getReason());

            if (e.getReason() == CancellationReason.Error) {
                System.out.println("CANCELED: ErrorCode=" + e.getErrorCode());
                System.out.println("CANCELED: ErrorDetails=" + e.getErrorDetails());
                System.out.println("CANCELED: Did you update the subscription info?");
            }

            stopRecognitionSemaphore.release();
        });

        recognizer.sessionStarted.addEventListener((s, e) -> {
            System.out.println("\n    Session started event.");
        });

        recognizer.sessionStopped.addEventListener((s, e) -> {
            System.out.println("\n    Session stopped event.");
        });
        stopRecognitionSemaphore = new Semaphore(0);
    }

    public static void close() throws Exception {
        pushStream.close();
        // Waits for completion.
        stopRecognitionSemaphore.acquire();
        recognizer.stopContinuousRecognitionAsync().get();
        config.close();
        audioInput.close();
        recognizer.close();
    }

    public static void stop() throws Exception {
        System.out.println("stop is execute");
        // Waits for completion.
//        stopRecognitionSemaphore.acquire();
//        recognizer.stopContinuousRecognitionAsync().get();
        recognizer.stopContinuousRecognitionAsync();
        System.out.println("stop is execute end");
    }


    public static void pronunciationAssessmentWithPushStream() throws InterruptedException, IOException, ExecutionException, URISyntaxException {
        Long l = System.currentTimeMillis();
        // Set audio format

        InputStream inputStream = new FileInputStream("D://file.wav");
        // Starts continuous recognition. Uses stopContinuousRecognitionAsync() to stop recognition.
        recognizer.startContinuousRecognitionAsync().get();
        byte[] readBuffer = new byte[1024];
        int bytesRead;
        while ((bytesRead = inputStream.read(readBuffer)) != -1) {
            if (bytesRead == readBuffer.length) {
                pushStream.write(readBuffer);
            } else {
                pushStream.write(Arrays.copyOfRange(readBuffer, 0, bytesRead));
            }
        }
        inputStream.close();

        System.out.println("耗时 is {}" + (System.currentTimeMillis() - l));
    }


    public static void langPushStrem(byte[] var1) throws InterruptedException, IOException, ExecutionException, URISyntaxException {
        Long l = System.currentTimeMillis();
        stopRecognitionSemaphore = new Semaphore(0);
        recognizer.startContinuousRecognitionAsync().get();
        pushStream.write(var1);
//        System.out.println("耗时 is {}" + (System.currentTimeMillis() - l));
    }
}
