package com.example.websockettest.webSocket.回收站;

import com.baidu.aip.speech.AipSpeech;
import javax.sound.sampled.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.HashMap;
import org.json.JSONObject;

public class RealtimeSpeechRecognition {
    // 设置APPID/AK/SK
    private static final String APP_ID = "31886218";
    private static final String API_KEY = "4ANkgrRxYaFt8jPRQ857iFRR";
    private static final String SECRET_KEY = "msDawnIG75BwGIrDTjn1aCC5NopOd4Xn";

    private static final AudioFormat FORMAT = new AudioFormat(16000, 16, 1, true, false);
    private static final int CHUNK_SIZE = 1024;

    public static void main(String[] args) throws Exception {
        AipSpeech client = new AipSpeech(APP_ID, API_KEY, SECRET_KEY);

        TargetDataLine microphone = AudioSystem.getTargetDataLine(FORMAT);
        microphone.open(FORMAT);
        microphone.start();

        System.out.println("Listening, please speak.");

        ByteArrayOutputStream audioBuffer = new ByteArrayOutputStream();

        while (true) {
            byte[] buffer = new byte[CHUNK_SIZE];
            int bytesRead = microphone.read(buffer, 0, buffer.length);
            audioBuffer.write(buffer, 0, bytesRead);

            if (audioBuffer.size() >= 10240) { // 每次识别10秒钟的音频
                byte[] audioData = audioBuffer.toByteArray();
                audioBuffer.reset();

                HashMap<String, Object> options = new HashMap<String, Object>();
                options.put("dev_pid", 1537); // 1537为普通话(纯中文识别模型)
                options.put("cuid", "123456PYTHON"); // 填写你的cuid
                options.put("len", audioData.length); // 识别音频的长度
                JSONObject result = client.asr(audioData, "pcm", 16000, options);
                if (result != null) {
                    String recognitionResult = result.optString("result", "");
                    System.out.println("Recognition result: " + recognitionResult+result.toString());
                }
            }
        }
    }
}
