import 'dart:async';
import 'package:flutter/cupertino.dart';
import 'package:flutter/services.dart';
import 'package:google_speech/generated/google/cloud/speech/v1/cloud_speech.pb.dart';
import 'package:google_speech/google_speech.dart' as gp;
import 'package:sound_stream/sound_stream.dart';
import 'dart:io';
class SpeechToText {
  static SpeechToText _instance;

  SpeechToText._();

  factory SpeechToText() => _instance ??= SpeechToText._();

  gp.ServiceAccount serviceAccount;
  gp.SpeechToText speechToText;
  gp.RecognitionConfig config;
  gp.StreamingRecognitionConfig streamConfig;

  final RecorderStream _recorder = RecorderStream();
  bool canUse = false;
  StreamSubscription<StreamingRecognizeResponse> subscription;
  Stream stream;

  //初始化引擎
  Future<void> initEngine() async {
    canUse = false;
    print('initEngine');
    serviceAccount ??= gp.ServiceAccount.fromString(
        '${(await rootBundle.loadString('assets/tflite/speech.json'))}');
    speechToText ??= gp.SpeechToText.viaServiceAccount(serviceAccount);
    config ??= gp.RecognitionConfig(
        encoding: gp.AudioEncoding.LINEAR16,
        model: gp.RecognitionModel.basic,
        enableAutomaticPunctuation: true,
        sampleRateHertz: 16000,
        languageCode: 'en-US');
    streamConfig ??= gp.StreamingRecognitionConfig(config: config,interimResults: true);
    _recorder.initialize();
    canUse = true;
  }

  void onListener(ValueChanged<StreamingRecognizeResponse> listener) async {
    // await record.stop();
    await _recorder.start();
    subscription = speechToText
        .streamingRecognize(streamConfig, _recorder.audioStream)
        .listen(listener);
  }

  Future<void> stop() async {
    await subscription?.cancel();
    await _recorder?.stop();
  }
}
