/*
 * Copyright 2018 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.jxw.cloudpen.web.netty.google;

// [START speech_transcribe_infinite_streaming]

import com.google.api.gax.rpc.ClientStream;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
import com.google.cloud.speech.v1p1beta1.*;
import com.google.protobuf.ByteString;
import com.google.protobuf.Duration;
import com.jxw.cloudpen.web.CloudPenApplication;
import org.springframework.boot.SpringApplication;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine.Info;
import javax.sound.sampled.TargetDataLine;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;

public class InfiniteStreamRecognizev2 {



  public static  ClientStream<StreamingRecognizeRequest> responseObserver2;
  //  private static final int STREAMING_LIMIT = 290000; // ~5 minutes
  private static final int STREAMING_LIMIT = 100000; // ~10 second
  public static final String RED = "\033[0;31m";
  public static final String GREEN = "\033[0;32m";
  public static final String YELLOW = "\033[0;33m";

  // Creating shared object
  public static volatile BlockingQueue<byte[]> sharedQueue = new LinkedBlockingQueue<byte[]>();
  public static int BYTES_PER_BUFFER = 6400; // buffer size in bytes

  public static int restartCounter = 0;
  public static ArrayList<ByteString> audioInput = new ArrayList<ByteString>();
  public static ArrayList<ByteString> lastAudioInput = new ArrayList<ByteString>();
  public static int resultEndTimeInMS = 0;
  public static int isFinalEndTime = 0;
  public static int finalRequestEndTime = 0;
  public static boolean newStream = true;
  public static double bridgingOffset = 0;
  public static boolean lastTranscriptWasFinal = false;
  public static StreamController referenceToStreamController;
  public static ByteString tempByteString;

  public static void main(String[] args) {
    try {
      //初始化netty 服务端

      infiniteStreamingRecognize( "zh");
    } catch (Exception e) {
      System.out.println("Exception caught: " + e);
    }

  }



  public static String convertMillisToDate(double milliSeconds) {
    long millis = (long) milliSeconds;
    DecimalFormat format = new DecimalFormat();
    format.setMinimumIntegerDigits(2);
    return String.format(
            "%s:%s /",
            format.format(TimeUnit.MILLISECONDS.toMinutes(millis)),
            format.format(
                    TimeUnit.MILLISECONDS.toSeconds(millis)
                            - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(millis))));
  }

  /**
   * Performs infinite streaming speech recognition
   */
  public static ClientStream<StreamingRecognizeRequest> infiniteStreamingRecognize(String languageCode) {
    ClientStream<StreamingRecognizeRequest> clientStream = null;
    ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
    //拿到结果
    try (SpeechClient client = SpeechClient.create()) {
      responseObserver =
              new ResponseObserver<StreamingRecognizeResponse>() {

                ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

                public void onStart(StreamController controller) {
                  referenceToStreamController = controller;
                }

                public void onResponse(StreamingRecognizeResponse response) {
                  responses.add(response);
                  StreamingRecognitionResult result = response.getResultsList().get(0);
                  Duration resultEndTime = result.getResultEndTime();
                  resultEndTimeInMS =
                          (int)
                                  ((resultEndTime.getSeconds() * 1000) + (resultEndTime.getNanos() / 1000000));
                  double correctedTime =
                          resultEndTimeInMS - bridgingOffset + (STREAMING_LIMIT * restartCounter);

                  SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
                  if (result.getIsFinal()) {
                    System.out.print(GREEN);
                    System.out.print("\033[2K\r");
                    System.out.printf(
                            "====>: %s: %s [confidence: %.2f]\n",
                            convertMillisToDate(correctedTime),
                            alternative.getTranscript(),
                            alternative.getConfidence());
                    isFinalEndTime = resultEndTimeInMS;
                    lastTranscriptWasFinal = true;
                  } else {
//                                System.out.print(RED);
//                                System.out.print("\033[2K\r");
//                                System.out.printf(
//                                        "--->%s: %s", convertMillisToDate(correctedTime), alternative.getTranscript());

                    System.out.println("识别字： "+alternative.getTranscript());

                    lastTranscriptWasFinal = false;
                  }
                }

                public void onComplete() {
                }

                public void onError(Throwable t) {
                }
              };
      clientStream = client.streamingRecognizeCallable().splitCall(responseObserver);

      //设置采集率
      RecognitionConfig recognitionConfig =
              RecognitionConfig.newBuilder()
                      .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
                      .setLanguageCode(languageCode)
                      .setSampleRateHertz(16000)
                      .build();

      //设置流识别
      StreamingRecognitionConfig streamingRecognitionConfig =
              StreamingRecognitionConfig.newBuilder()
                      .setConfig(recognitionConfig)
                      .setInterimResults(true)
                      .build();

      //入参
      StreamingRecognizeRequest request =
              StreamingRecognizeRequest.newBuilder()
                      .setStreamingConfig(streamingRecognitionConfig)
                      .build(); // The first request in a streaming call has to be a config

      clientStream.send(request);
      responseObserver2=clientStream;
      try {
        long startTime = System.currentTimeMillis();

        while (true) {
          if ((newStream) && (lastAudioInput.size() > 0)) {
            // if this is the first audio from a new request
            // calculate amount of unfinalized audio from last request
            // resend the audio to the speech client before incoming audio
            double chunkTime = STREAMING_LIMIT / lastAudioInput.size();
            // ms length of each chunk in previous request audio arrayList
            if (chunkTime != 0) {
              if (bridgingOffset < 0) {
                // bridging Offset accounts for time of resent audio
                // calculated from last request
                bridgingOffset = 0;
              }
              if (bridgingOffset > finalRequestEndTime) {
                bridgingOffset = finalRequestEndTime;
              }
              int chunksFromMs =
                      (int) Math.floor((finalRequestEndTime - bridgingOffset) / chunkTime);
              // chunks from MS is number of chunks to resend
              bridgingOffset =
                      (int) Math.floor((lastAudioInput.size() - chunksFromMs) * chunkTime);
              // set bridging offset for next request
              for (int i = chunksFromMs; i < lastAudioInput.size(); i++) {
                request =
                        StreamingRecognizeRequest.newBuilder()
                                .setAudioContent(lastAudioInput.get(i))
                                .build();
                clientStream.send(request);
              }
            }
            newStream = false;
          }

          //获取音频采集数据
//                    tempByteString = ByteString.copyFrom(sharedQueue.take());
//                    tempByteString = tempByteString2;
//
//                    request =
//                            StreamingRecognizeRequest.newBuilder().setAudioContent(tempByteString).build();
//                    audioInput.add(tempByteString);
//                    clientStream.send(request);
        }
      } catch (Exception e) {
        e.printStackTrace();
        System.out.println(e);
      }
    } catch (Exception e) {
      e.printStackTrace();
    }

    return clientStream;
  }

}
// [END speech_transcribe_infinite_streaming]
