package com.dragon.speech.service;

import com.google.api.gax.grpc.ApiStreamObserver;
import com.google.api.gax.grpc.StreamingCallable;
import com.google.cloud.speech.v1.*;
import com.google.common.util.concurrent.SettableFuture;
import com.google.protobuf.ByteString;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.util.*;

/**
 * speech api 从语音到文字
 * Created by jingcai.li on 2017/5/21.
 */
@Component
public class GoogleSpeech {
    public static final Logger logger = LoggerFactory.getLogger(GoogleSpeech.class);

    public String speechRecognize(byte[] data, String originalText) throws Exception {
        String[] textArr = originalText.split("\\s+|[,]|[.]");
        List<String> phrases = Arrays.asList(textArr);
        phrases.removeIf(e -> e.isEmpty());
        SpeechContext speechContext = SpeechContext.newBuilder().addAllPhrases(phrases).build();
        // Instantiates a client
        SpeechClient speech = SpeechClient.create();
        ByteString audioBytes = ByteString.copyFrom(data);
        // Builds the sync recognize request
        RecognitionConfig config = RecognitionConfig.newBuilder()
                .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
                .setSampleRateHertz(44100)
                .setLanguageCode("en-US")
                .addSpeechContexts(speechContext)
                .build();
        RecognitionAudio audio = RecognitionAudio.newBuilder()
                .setContent(audioBytes)
                .build();

        // Performs speech recognition on the audio file
        RecognizeResponse response = speech.recognize(config, audio);
        List<SpeechRecognitionResult> results = response.getResultsList();

        StringBuilder text = new StringBuilder();
        for (SpeechRecognitionResult result : results) {
            List<SpeechRecognitionAlternative> alternatives = result.getAlternativesList();
            for (SpeechRecognitionAlternative alternative : alternatives) {
                text.append(alternative.getTranscript());
            }
        }
        speech.close();
        return text.toString();
    }

    /**
     * Performs streaming speech recognition on raw PCM audio data.
     *
     * @param data
     * @param originalText
     * @throws Exception
     * @throws IOException
     */
    public List<String> streamingRecognize(byte[] data, String originalText) throws Exception, IOException {

//        Path path = Paths.get(fileName);
//        byte[] data = Files.readAllBytes(path);

        //add hints
        String[] textWords = originalText.split("\\s+|[,]|[.]");
        Set<String> phrasesSet = new HashSet<String>();
        CollectionUtils.addAll(phrasesSet, textWords);
        String[] textPhrases = originalText.split(",\\s+|\\.\\s+");
        CollectionUtils.addAll(phrasesSet, textPhrases);
        phrasesSet.removeIf(e -> e.isEmpty());
        SpeechContext speechContext = SpeechContext.newBuilder().addAllPhrases(phrasesSet).build();
        // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS
        SpeechClient speech = SpeechClient.create();

        // Configure request with local raw PCM audio
        RecognitionConfig recConfig = RecognitionConfig.newBuilder()
                .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
                .setLanguageCode("en-US")
                .setSampleRateHertz(44100)
                .addSpeechContexts(speechContext)
                .build();
        StreamingRecognitionConfig config = StreamingRecognitionConfig.newBuilder()
                .setConfig(recConfig)
                .setSingleUtterance(true)
                .build();

        class ResponseApiStreamingObserver<T> implements ApiStreamObserver<T> {
            private final SettableFuture<List<T>> future = SettableFuture.create();
            private final List<T> messages = new ArrayList<T>();

            @Override
            public void onNext(T message) {
                messages.add(message);
            }

            @Override
            public void onError(Throwable t) {
                future.setException(t);
            }

            @Override
            public void onCompleted() {
                future.set(messages);
            }

            // Returns the SettableFuture object to get received messages / exceptions.
            public SettableFuture<List<T>> future() {
                return future;
            }
        }

        ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver =
                new ResponseApiStreamingObserver<StreamingRecognizeResponse>();

        StreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable =
                speech.streamingRecognizeCallable();

        ApiStreamObserver<StreamingRecognizeRequest> requestObserver =
                callable.bidiStreamingCall(responseObserver);

        // The first request must **only** contain the audio configuration:
        requestObserver.onNext(StreamingRecognizeRequest.newBuilder()
                .setStreamingConfig(config)
                .build());

        // Subsequent requests must **only** contain the audio data.
        requestObserver.onNext(StreamingRecognizeRequest.newBuilder()
                .setAudioContent(ByteString.copyFrom(data))
                .build());

        // Mark transmission as completed after sending the data.
        requestObserver.onCompleted();

        List<StreamingRecognizeResponse> responses = responseObserver.future().get();
        logger.info("识别结果:" + responses);
        List<String> textList = new ArrayList<>();
        for (StreamingRecognizeResponse response : responses) {
            for (StreamingRecognitionResult result : response.getResultsList()) {
                for (SpeechRecognitionAlternative alternative : result.getAlternativesList()) {
                    textList.add(alternative.getTranscript());
                }
            }
        }
        speech.close();
//        logger.info("返回识别结果list:"+textList);
        return textList;
    }
}
