package com.liam.websocketdemo.service.impl;

import com.liam.websocketdemo.service.TransService;
import com.liam.websocketdemo.utils.AuthV4Util;
import com.liam.websocketdemo.utils.WebSocketUtil;
import okio.ByteString;
import org.springframework.stereotype.Service;

import javax.sound.sampled.*;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.Map;

/**
 * @Author: LiamLMK
 * @CreateTime: 2024-11-20
 * @Description:
 * @Version: 1.0
 */

@Service
public class TransServiceImpl implements TransService {

    private static final String APP_KEY = "2c9b4317f0a60c6d";     // 您的应用ID
    private static final String APP_SECRET = "8tAbon5rWMaCRIaKenPjOgJJpzS8kM8T";  // 您的应用密钥

    private static final int BUFFER_SIZE = 6400; // 每次发送的字节数

    @Override
    public void startTranslation() throws Exception {
        // 添加请求参数
        Map<String, String[]> params = createRequestParams();
        // 添加鉴权相关参数
        AuthV4Util.addAuthParams(APP_KEY, APP_SECRET, params);
        // 创建websocket连接
        WebSocketUtil.initConnection("wss://openapi.youdao.com/stream_speech_trans", params);
        // 发送流式数据
//        sendData(PATH, 6400);
        captureAndSendAudio();
    }


    public static void captureAndSendAudio() throws LineUnavailableException, InterruptedException {
        // 设置音频格式
        AudioFormat format = new AudioFormat(16000, 16, 1, true, false);
        DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
        if (!AudioSystem.isLineSupported(info)) {
            throw new LineUnavailableException("Microphone not supported");
        }

        TargetDataLine microphone = (TargetDataLine) AudioSystem.getLine(info);
        microphone.open(format);
        microphone.start();

//        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
        byte[] buffer = new byte[BUFFER_SIZE];
        int bytesRead;

        System.out.println("Start capturing audio from microphone...");

        // 持续读取麦克风数据并发送到 WebSocket
        while (microphone.isOpen()) {
            bytesRead = microphone.read(buffer, 0, buffer.length);
            if (bytesRead > 0) {
                WebSocketUtil.sendBinaryMessage(ByteString.of(buffer, 0, bytesRead));
                Thread.sleep(200); // 控制发送间隔
            }
        }

        // 停止捕获
        microphone.stop();
        microphone.close();

        // 发送结束信号
        byte[] closebytes = "{\"end\": \"true\"}".getBytes();
        WebSocketUtil.sendBinaryMessage(ByteString.of(closebytes));
        System.out.println("Audio capturing stopped.");
    }


    private static Map<String, String[]> createRequestParams() {

        String from = "zh-CHS";
        String to = "en";
        String rate = "16000";
        String format = "wav";

        return new HashMap<String, String[]>() {{
            put("from", new String[]{from});
            put("to", new String[]{to});
            put("format", new String[]{format});
            put("channel", new String[]{"1"});
            put("version", new String[]{"v1"});
            put("rate", new String[]{rate});
        }};
    }
}
