package com.ruoyi.utils.alicloud;

import com.alibaba.nls.client.AccessToken;
import com.alibaba.nls.client.protocol.NlsClient;
import com.alibaba.nls.client.protocol.OutputFormatEnum;
import com.alibaba.nls.client.protocol.SampleRateEnum;
import com.alibaba.nls.client.protocol.tts.SpeechSynthesizer;
import com.alibaba.nls.client.protocol.tts.SpeechSynthesizerListener;
import com.alibaba.nls.client.protocol.tts.SpeechSynthesizerResponse;
import com.ruoyi.retirement.domain.RetirementConfig;
import com.ruoyi.retirement.service.IRetirementConfigService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.UUID;

@Component
public class SoundOSS_Dialect {
    @Value("${ruoyi.profile}")
    private String filePath;

    @Autowired
    private IRetirementConfigService retirementConfigService;

    public static SoundOSS_Dialect soundOSS_Dialect;

    @PostConstruct
    public void init() {
        soundOSS_Dialect = this;
        soundOSS_Dialect.retirementConfigService = this.retirementConfigService ;
        // 初使化时将已静态化的otherService实例化
    }

    private static String APP_KEY = "";
    private static String ACCESS_KEY_ID = "";
    private static String ACCESS_KEY_SECRET = "";

    static NlsClient client ;

    public String uploadSoundOSS(String text,String voiceStr) {
        RetirementConfig retirementConfig = soundOSS_Dialect.retirementConfigService.selectRetirementConfigById(1L);
        APP_KEY = retirementConfig.getOssAppKey();
        ACCESS_KEY_ID  = retirementConfig.getAccesskeyId();
        ACCESS_KEY_SECRET = retirementConfig.getAccesskeySecret();

        String fileName = UUID.randomUUID().toString().replace("-", "");
        SpeechSynthesizerDemo("");
        //自己选一个本地路径填写
        File f =new File(filePath+"/"+fileName+".wav");
        process(text,f,voiceStr);
        client.shutdown();
        return "/profile/"+fileName+".wav";
    }

    public  void  SpeechSynthesizerDemo(String url) {
        AccessToken accessToken = new AccessToken(ACCESS_KEY_ID, ACCESS_KEY_SECRET);
        try {
            accessToken.apply();
            if(url.isEmpty()) {
                client = new NlsClient(accessToken.getToken());
            }else {
                client = new NlsClient(url, accessToken.getToken());
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    private  SpeechSynthesizerListener getSynthesizerListener(File f) {
        SpeechSynthesizerListener listener = null;
        try {
            listener = new SpeechSynthesizerListener() {
                FileOutputStream fout = new FileOutputStream(f);
                private boolean firstRecvBinary = true;
                //语音合成结束
                @Override
                public void onComplete(SpeechSynthesizerResponse response) {
                    //调用onComplete时表示所有TTS数据已接收完成，因此为整个合成数据的延迟。该延迟可能较大，不一定满足实时场景。
                    System.out.println("name: " + response.getName() +
                            ", status: " + response.getStatus()+
                            ", output file :"+ f.getAbsolutePath()
                    );
                }
                //语音合成的语音二进制数据
                @Override
                public void onMessage(ByteBuffer message) {
                    try {
                        if(firstRecvBinary) {
                            //计算首包语音流的延迟，收到第一包语音流时，即可以进行语音播放，以提升响应速度（特别是实时交互场景下）。
                            firstRecvBinary = false;
                        }
                        byte[] bytesArray = new byte[message.remaining()];
                        message.get(bytesArray, 0, bytesArray.length);
                        fout.write(bytesArray);
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
                @Override
                public void onFail(SpeechSynthesizerResponse response){
                    //task_id是调用方和服务端通信的唯一标识，当遇到问题时需要提供task_id以便排查。
                    System.out.println(
                            "task_id: " + response.getTaskId() +
                                    //状态码 20000000 表示识别成功
                                    ", status: " + response.getStatus() +
                                    //错误信息
                                    ", status_text: " + response.getStatusText());
                }
            };
        } catch (Exception e) {
            e.printStackTrace();
        }
        return listener;
    }
    public  void process(String text,File f,String voiceStr) {
        SpeechSynthesizer synthesizer = null;
        try {
            //创建实例，建立连接。
            synthesizer = new SpeechSynthesizer(client,getSynthesizerListener(f));
            synthesizer.setAppKey(APP_KEY);
            //设置返回音频的编码格式
            synthesizer.setFormat(OutputFormatEnum.WAV);
            //设置返回音频的采样率
            synthesizer.setSampleRate(SampleRateEnum.SAMPLE_RATE_16K);
            //发音人
            synthesizer.setVoice("zhiru");
            /*Sound_Voice voice = new Sound_Voice(voiceStr);
            if (voiceStr == null){
                synthesizer.setVoice("chuangirl");
            }else {
                synthesizer.setVoice(voice.getSpeaker());
            }*/
            //语调，范围是-500~500，可选，默认是0。
            synthesizer.setPitchRate(10);
            //语速，范围是-500~500，默认是0。
            synthesizer.setSpeechRate(5);
            //设置用于语音合成的文本
            System.out.println("传入前的参数："+text);
            synthesizer.setText(text);
            // 是否开启字幕功能（返回相应文本的时间戳），默认不开启，需要注意并非所有发音人都支持该参数。
            synthesizer.addCustomedParam("enable_subtitle", false);
            //此方法将以上参数设置序列化为JSON格式发送给服务端，并等待服务端确认。
            long start = System.currentTimeMillis();
            synthesizer.start();
            //等待语音合成结束
            synthesizer.waitForComplete();
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            //关闭连接
            if (null != synthesizer) {
                synthesizer.close();
            }
        }
    }
}
