package com.jxw.cloudpen.web.netty.google;

/**
 * @author ligang
 * @create 2025/4/23 14:18
 */

import com.alibaba.fastjson.JSONObject;
import com.google.api.gax.rpc.BidiStream;
import com.google.cloud.speech.v2.RecognizerName;
import com.google.cloud.speech.v2.SpeechClient;
import com.google.cloud.speech.v2.StreamingRecognizeRequest;
import com.google.cloud.speech.v2.StreamingRecognizeResponse;
import com.google.protobuf.ByteString;
import com.jxw.cloudpen.web.netty.util.ByteObjUtil;
import lombok.extern.slf4j.Slf4j;

import javax.sound.sampled.TargetDataLine;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;

@Slf4j
public class StreamingRecognizeExample {
    private static TargetDataLine targetDataLine;
    private static volatile BlockingQueue<byte[]> sharedQueue = new LinkedBlockingQueue<byte[]>();
    static BidiStream<StreamingRecognizeRequest, StreamingRecognizeResponse> bidiStream;

    public static void main(String[] args) throws Exception {
//        streamingRecognize();
        SpeechClient speechClient = SpeechClient.create();
        bidiStream =
                speechClient.streamingRecognizeCallable().call();
//        for (StreamingRecognizeResponse response : bidiStream) {
//            // Do something when a response is received.
//            log.info("response is {}", JSONObject.toJSONString(response));
//        }
        // Creating microphone input buffer thread
        MicBuffer micrunnable = new MicBuffer();
        Thread micThread = new Thread(micrunnable);
        micThread.start();


        speed speed = new speed();
        new Thread(speed).start();

        resule resule = new resule();
        new Thread(resule).start();
    }
    static class MicBuffer implements Runnable {
        @Override
        public void run() {
            System.out.println("Start speaking...Press Ctrl-C to stop");
            targetDataLine.start();
            byte[] data = new byte[6400];
            while (targetDataLine.isOpen()) {
                try {
                    int numBytesRead = targetDataLine.read(data, 0, data.length);
                    if ((numBytesRead <= 0) && (targetDataLine.isOpen())) {
                        continue;
                    }
                    //组装数据
                    sharedQueue.put(data.clone());
                } catch (InterruptedException e) {
                    System.out.println("Microphone input buffering interrupted : " + e.getMessage());
                }
            }
        }
    }


    static class speed implements Runnable {
        @Override
        public void run() {
            byte[] bytes= new byte[0];
            try {
                bytes = sharedQueue.take();
                if(bytes.length>0){
                    StreamingRecognizeRequest request =
                            StreamingRecognizeRequest.newBuilder()
//                            .setRecognizer(
//                                    RecognizerName.of("[PROJECT]", "[LOCATION]", "[RECOGNIZER]").toString())
                                    .setAudio( ByteString.copyFrom(bytes))
                                    .build();
                    bidiStream.send(request);
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }


    static class resule implements Runnable {
        @Override
        public void run() {
            for (StreamingRecognizeResponse response : bidiStream) {
                // Do something when a response is received.
                log.info("response is {}", JSONObject.toJSONString(response));
            }
        }
    }

//    public static void asyncStreamingRecognize() throws Exception {
//        // This snippet has been automatically generated and should be regarded as a code template only.
//        // It will require modifications to work:
//        // - It may require correct/in-range values for request initialization.
//        // - It may require specifying regional endpoints when creating the service client as shown in
//        // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
//        try (SpeechClient speechClient = SpeechClient.create()) {
//            BidiStream<StreamingRecognizeRequest, StreamingRecognizeResponse> bidiStream =
//                    speechClient.streamingRecognizeCallable().call();
//
//
//            StreamingRecognizeRequest request =
//                    StreamingRecognizeRequest.newBuilder()
////                            .setRecognizer(
////                                    RecognizerName.of("[PROJECT]", "[LOCATION]", "[RECOGNIZER]").toString())
//                            .setAudio()
//
//                            .build();
//
//
//            bidiStream.send(request);
//            for (StreamingRecognizeResponse response : bidiStream) {
//                // Do something when a response is received.
//
//            }
//        }
//    }


//    public static void streamingRecognize() throws Exception {
//        // 初始化客户端
//        try (SpeechClient speechClient = SpeechClient.create()) {
//            // 配置识别参数
//            RecognitionConfig recognitionConfig = RecognitionConfig.newBuilder()
//                    .setAutoDecodingConfig(AutoDetectDecodingConfig.newBuilder().build())
////                    .setLanguageCode("zh-CN")  // 设置语言代码
//                    .setLanguageCodes(0,"zh-CN")
//                    .setModel("latest_long")    // 使用最新长格式模型
//                    .build();
//
//            StreamingRecognitionConfig streamingRecognitionConfig =
//                    StreamingRecognitionConfig.newBuilder()
//                            .setConfig(recognitionConfig)
//                            .setInterimResults(true)  // 获取中间结果
//                            .build();
//
//            // 创建响应观察者处理返回结果
//            ResponseObserver<StreamingRecognizeResponse> responseObserver =
//                    new ResponseObserver<>() {
//                        ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();
//
//                        @Override
//                        public void onStart(StreamController controller) {}
//
//                        @Override
//                        public void onResponse(StreamingRecognizeResponse response) {
//                            // 处理每个响应
//                            responses.add(response);
//                            for (StreamingRecognitionResult result : response.getResultsList()) {
//                                if (result.getIsFinal()) {
//                                    System.out.println("最终结果: " +
//                                            result.getAlternativesList().get(0).getTranscript());
//                                } else {
//                                    System.out.println("临时结果: " +
//                                            result.getAlternativesList().get(0).getTranscript());
//                                }
//                            }
//                        }
//
//                        @Override
//                        public void onComplete() {
//                            System.out.println("识别完成");
//                        }
//
//                        @Override
//                        public void onError(Throwable t) {
//                            System.out.println("识别错误: " + t.getMessage());
//                        }
//                    };
//
//            // 创建客户端流
//            ClientStream<StreamingRecognizeRequest> clientStream =
//                    speechClient.streamingRecognizeCallable().splitCall(responseObserver);
//
//            // 发送配置
//            StreamingRecognizeRequest request =
//                    StreamingRecognizeRequest.newBuilder()
//                            .setStreamingConfig(streamingRecognitionConfig)
//                            .build();
//            clientStream.send(request);
//
//            // 模拟音频流输入 - 实际应用中替换为真实音频流
//            BlockingQueue<byte[]> audioQueue = new LinkedBlockingQueue<>();
//            // 这里应该从你的音频源填充队列
//            // audioQueue.add(yourAudioChunk);
//
//            // 示例: 添加一些模拟音频数据
//            audioQueue.add(new byte[320]); // 模拟音频块
//
//            while (true) {
//                byte[] audioChunk = audioQueue.poll();
//                if (audioChunk == null) {
//                    // 没有更多音频数据
//                    break;
//                }
//
//                request = StreamingRecognizeRequest.newBuilder()
//                        .setAudioContent(ByteString.copyFrom(audioChunk))
//                        .build();
//                clientStream.send(request);
//            }
//
//            // 关闭流
//            clientStream.closeSend();
//        }
//    }
}
