package com.jxw.cloudpen.web.ify;

import cn.xfyun.api.IatClient;
import cn.xfyun.model.response.iat.IatResponse;
import cn.xfyun.model.response.iat.IatResult;
import cn.xfyun.model.response.iat.Text;
import cn.xfyun.service.iat.AbstractIatWebSocketListener;
import com.alibaba.fastjson.JSONObject;
import com.jxw.cloudpen.web.ify.util.MicrophoneRecorderUtil;
import com.jxw.cloudpen.web.microsoft.stremspeed.StreamData;
import lombok.extern.slf4j.Slf4j;
import okhttp3.Response;
import okhttp3.WebSocket;
import org.apache.commons.codec.binary.StringUtils;

import javax.sound.sampled.*;
import java.io.*;
import java.net.MalformedURLException;
import java.security.SignatureException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;


/**
 * IAT( iFly Auto Transform ) 语音听写
 * 1、APPID、APISecret、APIKey信息获取：<a href="https://console.xfyun.cn/services/iat">...</a>
 * 2、文档地址：<a href="https://www.xfyun.cn/doc/asr/voicedictation/API.html">...</a>
 *
 * @author kaili23
 */
//@Slf4j
public class IatClientApp {


    /**
     * 服务鉴权参数
     */
    private static final String APP_ID = "604041b8";
    private static final String API_KEY = "760d2e51fe1e9a9bd9859fc6f2e2afbc";
    private static final String API_SECRET = "e83378eecf20a6bf41c5b5e2b0956fe0";

    /**
     * 音频文件路径
     */
    private static String audioFilePath;

    /**
     * 记录操作耗时与完整结果
     */
    private static final SimpleDateFormat SDF = new SimpleDateFormat("yyy-MM-dd HH:mm:ss.SSS");
    private static Date dateBegin;
    private static List<Text> resultSegments;

    /**
     * 语音听写客户端
     */
    private static final IatClient IAT_CLIENT;

    static {
        IAT_CLIENT = new IatClient.Builder()
                .signature(APP_ID, API_KEY, API_SECRET)
                // 动态修正功能：值为wpgs时代表开启（包含修正功能的）流式听写
                .dwa("wpgs")
                .language("zh")
                .domain("iat")
                .ptt(1)

                .build();

//        try {
//            audioFilePath = Objects.requireNonNull(IatClientApp.class.getResource("/")).toURI().getPath() + "/audio/iat_pcm_16k.pcm";
//        } catch (Exception e) {
////            log.error("资源路径获取失败", e);
//        }
    }

    /**
     * WebSocket监听器实现，用于处理语音听写结果
     * 功能说明：
     * 1、成功回调：解析中间/最终结果，处理错误码；
     * 2、失败回调：自定义处理（记录通信异常等）。
     */
    private static final AbstractIatWebSocketListener IAT_LISTENER = new AbstractIatWebSocketListener() {

        @Override
        public void onSuccess(WebSocket webSocket, IatResponse iatResponse) {
            if (iatResponse.getCode() != 0) {
                System.out.println("code：{}, error：{}, sid：{}" + iatResponse.getCode() + iatResponse.getMessage() + iatResponse.getSid());
                System.out.println("错误码查询链接：https://www.xfyun.cn/document/error-code");
                return;
            }

            if (iatResponse.getData() != null) {

                System.out.println("iatResponse is {}" + JSONObject.toJSONString(iatResponse.getData()));
                if (iatResponse.getData().getResult() != null) {
                    // 解析服务端返回结果
                    IatResult result = iatResponse.getData().getResult();
                    Text textObject = result.getText();
                    handleResultText(textObject);
                    System.out.println("中间识别结果：{}" + getFinalResult());
                }

                if (iatResponse.getData().getStatus() == 2) {
                    // resp.data.status ==2 说明数据全部返回完毕，可以关闭连接，释放资源
                    System.out.println("session end ");
                    Date dateEnd = new Date();
//                    log.info("识别开始时间：{}，识别结束时间：{}，总耗时：{}ms", SDF.format(dateBegin), SDF.format(dateEnd), dateEnd.getTime() - dateBegin.getTime());


                    System.out.println("识别开始时间：" +SDF.format(dateBegin));
                    System.out.println("识别结束时间：" +SDF.format(dateEnd));
                    System.out.println("总耗时：" +(dateEnd.getTime() - dateBegin.getTime()));


                    System.out.println("最终识别结果：【{}】，本次识别sid：{}" + getFinalResult() + iatResponse.getSid());
                    IAT_CLIENT.closeWebsocket();
                    System.exit(0);
                } else {
                    // 根据返回的数据自定义处理逻辑
                }
            }
        }

        @Override
        public void onFail(WebSocket webSocket, Throwable t, Response response) {
            // 自定义处理逻辑
        }

    };
    static ExecutorService executorService = Executors.newFixedThreadPool(3);

    public static void main(String[] args) throws SignatureException, LineUnavailableException, IOException {
        // 方式一：处理从文件中获取的音频数据
//        processAudioFromFile();

        // 方式二：处理麦克风输入的音频数据
         processAudioFromMicrophone();


//        executorService.submit(() -> {
//            try {
//                executeMusic();
//            } catch (LineUnavailableException e) {
//
//            }
//        });


    }

    /**
     * 处理从文件中获取的音频数据
     */
    public static void processAudioFromFile() {
        // 记录操作耗时与最终结果
        dateBegin = new Date();
        resultSegments = new ArrayList<>();

        try {
            File file = new File(audioFilePath);
            IAT_CLIENT.send(file, IAT_LISTENER);
        } catch (FileNotFoundException e) {
//            log.error("音频文件未找到，路径：{}", audioFilePath, e);
            throw new RuntimeException("音频文件加载失败，请检查路径：" + audioFilePath);
        } catch (MalformedURLException e) {
//            log.error("无效的URL格式", e);
            throw new RuntimeException("音频服务地址配置错误", e);
        } catch (SignatureException e) {
//            log.error("API签名异常", e);
            throw new RuntimeException("服务鉴权失败，请检查API密钥配置");
        }
    }

    /**
     * 处理麦克风输入的音频数据
     */
    public static void processAudioFromMicrophone() {
        MicrophoneRecorderUtil recorder = null;

        try (Scanner scanner = new Scanner(System.in)) {
            System.out.println("按回车开始实时听写...");
            scanner.nextLine();

            // 创建带缓冲的音频管道流
            PipedInputStream audioInputStream = new PipedInputStream();
            PipedOutputStream audioOutputStream = new PipedOutputStream(audioInputStream);

            // 配置录音工具
            recorder = new MicrophoneRecorderUtil();

            // 开始录音并初始化状态
            dateBegin = new Date();
            resultSegments = new ArrayList<>();
            recorder.startRecording(audioOutputStream);

            // 调用流式听写服务
            IAT_CLIENT.send(audioInputStream, IAT_LISTENER);

            System.out.println("正在聆听，按回车结束听写...");
            scanner.nextLine();
        } catch (LineUnavailableException e) {
//            log.error("录音设备不可用", e);
            throw new RuntimeException("麦克风初始化失败，请检查录音设备", e);
        } catch (SignatureException e) {
//            log.error("API签名验证失败", e);
            throw new RuntimeException("服务鉴权异常，请检查密钥配置", e);
        } catch (IOException e) {
//            log.error("流操作异常", e);
            throw new RuntimeException("音频数据传输失败", e);
        } finally {
            // 释放资源
            if (recorder != null) {
                recorder.stopRecording();
            }
            // 此处取消了手动关闭WebSocket连接，listener收到服务端最后一帧后再关闭连接。
        }
    }

    /**
     * 处理返回结果（包括全量返回与流式返回（结果修正））
     */
    private static void handleResultText(Text textObject) {
        // 处理流式返回的替换结果
        if (StringUtils.equals(textObject.getPgs(), "rpl") && textObject.getRg() != null && textObject.getRg().length == 2) {
            // 返回结果序号sn字段的最小值为1
            int start = textObject.getRg()[0] - 1;
            int end = textObject.getRg()[1] - 1;

            // 将指定区间的结果设置为删除状态
            for (int i = start; i <= end && i < resultSegments.size(); i++) {
                resultSegments.get(i).setDeleted(true);
            }
            // logger.info("替换操作，服务端返回结果为：" + textObject);
        }

        // 通用逻辑，添加当前文本到结果列表
        resultSegments.add(textObject);
    }

    /**
     * 获取最终结果
     */
    private static String getFinalResult() {
        StringBuilder finalResult = new StringBuilder();
        for (Text text : resultSegments) {
            if (text != null && !text.isDeleted()) {
                finalResult.append(text.getText());
            }
        }
        return finalResult.toString();
    }


    private static TargetDataLine targetDataLine;

    private static volatile BlockingQueue<byte[]> sharedQueue = new LinkedBlockingQueue<byte[]>();

    public static StreamData executeMusic() throws LineUnavailableException {


        // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
        // bigEndian: false
        AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
        DataLine.Info targetInfo =
                new DataLine.Info(
                        TargetDataLine.class,
                        audioFormat); // Set the system information to read from the microphone audio
        // stream

        if (!AudioSystem.isLineSupported(targetInfo)) {
            System.out.println("Microphone not supported");
            System.exit(0);
        }
        // Target data line captures the audio stream the microphone produces.
        targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
        targetDataLine.open(audioFormat);
        System.out.println("Start speaking...Press Ctrl-C to stop");
        targetDataLine.start();
        PipedInputStream audioInputStream = new PipedInputStream(1280);
        PipedOutputStream audioOutputStream = null;
        try {
            audioOutputStream = new PipedOutputStream(audioInputStream);
        } catch (IOException e) {
            e.printStackTrace();
        }

        StreamData result = new StreamData();
// Microphone Input buffering
        class MicBuffer implements Runnable {
            PipedInputStream audioInputStream;
            PipedOutputStream audioOutputStream;
            public MicBuffer(PipedInputStream audioInputStream,PipedOutputStream audioOutputStream){
                this.audioInputStream = audioInputStream;
                this.audioOutputStream =audioOutputStream;
            }
            @Override
            public void run() {
                byte[] data = new byte[1280];
                Boolean recording = true;
                while (targetDataLine.isOpen() && recording) {
                    try {
                        int numBytesRead = targetDataLine.read(data, 0, data.length);
                        if ((numBytesRead <= 0) && (targetDataLine.isOpen())) {
                            continue;
                        }
                        audioOutputStream.write(data.clone());
                        audioOutputStream.flush();
                        // 调用流式听写服务
                        IAT_CLIENT.send(audioInputStream, IAT_LISTENER);
//                        //组装数据
//                        sharedQueue.put(data.clone());
                    } catch (Exception e) {
                        System.out.println("Microphone input buffering interrupted : " + e.getMessage());
                    }
                }
            }

        }

//        // Creating microphone input buffer thread
        MicBuffer micrunnable = new MicBuffer(audioInputStream,audioOutputStream);
        Thread micThread = new Thread(micrunnable);
        micThread.start();
//        try {
//            if (!AudioSystem.isLineSupported(targetInfo)) {
//                System.out.println("Microphone not supported");
//                System.exit(0);
//            }
//            long startTime = System.currentTimeMillis();
//            PipedInputStream audioInputStream = new PipedInputStream(1280);
//            PipedOutputStream audioOutputStream = new PipedOutputStream(audioInputStream);
//
//            while (true) {
//                byte[] bytes = sharedQueue.take();
//                if (bytes != null && bytes.length > 0) {
////                    result = new StreamData();
////                    result.setClientId(clientId);
////                    result.setData(bytes);
////                    result.setType(2);
////                    result.setToken(token);
//////                bootstrap.channel.writeAndFlush(result);
//////                    bootstrap.channel.writeAndFlush(ByteObjUtil.objToBytes(result));
//////                    this.send(ByteObjUtil.objToBytes(result));
////                    if (!isStop) {
////                        this.send(JSONObject.toJSONString(result));
//////                         System.out.println("result is {}"+JSONObject.toJSONString(result));
////                    } else {
////                        isStop = false;
////                    }
//                    // 创建带缓冲的音频管道流
//
//                    audioOutputStream.write(bytes);
//                    audioOutputStream.flush();
//                    // 调用流式听写服务
//                    IAT_CLIENT.send(audioInputStream, IAT_LISTENER);
//
//
//
//                }
//
//            }
//        } catch (Exception e) {
//            System.out.println(e);
//        }

//        try {
//            Thread.sleep(5000);
//            micThread.stop();
//        } catch (InterruptedException e) {
//            e.printStackTrace();
//        }
        return result;
    }

}