import { Injectable } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import * as sdk from 'microsoft-cognitiveservices-speech-sdk';
import { TtsDtoResDto } from './dto/tts.dto';
import { getStaticPath } from 'src/utils/file.util';

@Injectable()
export class TtsService {
  constructor(private configService: ConfigService) {}

  /**
   * 翻译
   * @param str
   * @returns
   */
  public async txt2audio(
    option: TranslateOption,
    cuid: string,
  ): Promise<TtsDtoResDto> {
    return await this.translate(option, cuid);
  }

  public async translate(
    option: TranslateOption,
    cuid: string,
  ): Promise<TtsDtoResDto> {
    const {
      tex,
      voiceName,
      // , rate, volume, style
    } = option;
    const language = 'zh-CN';
    const rate = '0%';
    const text = tex;
    const { key, region } = this.configService.get('tts');
    const { dirPath, dirPrefix } = this.configService.get('upload');
    return new Promise((resolve, reject) => {
      const name = `${cuid}-${new Date().getTime()}.mp3`;
      const audioFile = `${dirPath}${dirPrefix}/${name}`;
      const speechConfig = sdk.SpeechConfig.fromSubscription(key, region);
      const audioConfig = sdk.AudioConfig.fromAudioFileOutput(audioFile);
      const speechSynthesisLanguage = language; // 'zh-CN';
      speechConfig.speechSynthesisVoiceName = voiceName;
      speechConfig.speechSynthesisLanguage = speechSynthesisLanguage;

      // speechConfig.rate = rate;
      // speechConfig.volume = volume;
      // speechConfig.style = style;
      let synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
      // synthesizer.rate = rate;
      // 使用 SSML 设置语速
      const ssml = `
      <speak version='1.0' xmlns='http://www.w3.org/2001/10/synthesis' 
          xml:lang='${speechSynthesisLanguage}'>
          <voice name='${voiceName}'> 
              <prosody rate='${rate}'>${text}</prosody>
          </voice>
      </speak>`;

      synthesizer.speakSsmlAsync(
        ssml,
        function (result: sdk.SpeechSynthesisResult) {
          synthesizer.close();
          synthesizer = null;
          if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted) {
            console.log('语音合成完成');
            resolve({
              path: getStaticPath(`${dirPrefix}/${name}`),
              name,
              duration: Math.round(result.audioDuration / 10000000),
              size: result.audioData.byteLength,
              type: 'audio/mp3',
            });
          } else {
            const msg = `语音合成已取消,${result.errorDetails}语音资源密钥和区域值错误`;
            console.error(msg);
            reject(msg);
          }
        },
        function (err: string) {
          console.trace('err - ' + err);
          synthesizer.close();
          synthesizer = null;
          reject(err);
        },
      );
    });
  }
}

interface TranslateOption {
  tex: string;
  voiceName: string;
  // rate: string;
  // volume: string;
  // style: string;
}
