package com.enjoy.config;

import cn.hutool.core.date.DateUtil;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.nls.client.protocol.NlsClient;
import com.alibaba.nls.client.protocol.asr.SpeechTranscriber;
import com.alibaba.nls.client.protocol.asr.SpeechTranscriberListener;
import com.alibaba.nls.client.protocol.asr.SpeechTranscriberResponse;
import com.aliyuncs.CommonRequest;
import com.aliyuncs.CommonResponse;
import com.aliyuncs.DefaultAcsClient;
import com.aliyuncs.IAcsClient;
import com.aliyuncs.exceptions.ClientException;
import com.aliyuncs.http.FormatType;
import com.aliyuncs.http.MethodType;
import com.aliyuncs.http.ProtocolType;
import com.aliyuncs.profile.DefaultProfile;
import com.enjoy.model.command.resp.MeetingMsg;
import lombok.AllArgsConstructor;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.BinaryMessage;
import org.springframework.web.socket.CloseStatus;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;
import org.springframework.web.socket.handler.BinaryWebSocketHandler;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;

@Slf4j
@Component
@AllArgsConstructor
@NoArgsConstructor
public class AudioWebSocketHandler extends BinaryWebSocketHandler {


    @Value("${appKey}")
    private String appKey;
    @Value("${accessKeyId}")
    private String accessKeyId;
    @Value("${accessKeySecret}")
    private String accessKeySecret;

    private static NlsClient NLS_CLIENT = new NlsClient("default");
    // 会议连接地址
    private final ConcurrentHashMap<WebSocketSession, String> meetingJoinUrlMap = new ConcurrentHashMap<>();
    // 任务ID
    private final ConcurrentHashMap<WebSocketSession, String> taskIdMap = new ConcurrentHashMap<>();
    // 会议开始时间
    private final ConcurrentHashMap<WebSocketSession, Long> startTimeMap = new ConcurrentHashMap<>();
    // 会议连接地址
    private final ConcurrentHashMap<WebSocketSession, SpeechTranscriber> transcriberMap = new ConcurrentHashMap<>();


    @Override
    protected void handleTextMessage(WebSocketSession session, TextMessage message) {
        // 处理元数据
        String payload = message.getPayload();
        try {
            createMeeting(session,payload);
            JSONObject root = new JSONObject();
            root.put("taskId", taskIdMap.get(session));
            session.sendMessage(new TextMessage(root.toJSONString()));
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    @Override
    public void handleBinaryMessage(WebSocketSession session, BinaryMessage message) {
        ByteBuffer byteBuffer = message.getPayload();
        byte[] array = new byte[byteBuffer.remaining()];
        byteBuffer.get(array);
        SpeechTranscriber speechTranscriber = transcriberMap.get(session);
        String taskId = taskIdMap.get(session);
        if (speechTranscriber != null) {
            if (speechTranscriber.getState() == SpeechTranscriber.State.STATE_CLOSED) {
                try {
                    speechTranscriber = new SpeechTranscriber(NLS_CLIENT, "default", createListener(session), meetingJoinUrlMap.get(session));
                    speechTranscriber.start();
                    transcriberMap.put(session, speechTranscriber);
                    speechTranscriber.send(array);
                } catch (Exception e) {
                    log.error("重新建立连接异常", e);
                }
            } else {
                speechTranscriber.send(array);
            }
        } else {
            log.error("未找到 session={}, taskid={} 对应的 SpeechTranscriber", session, taskId);
        }
    }


    /**
     * 创建会议
     * @param session 会话
     * @param payload 入参
     */
    private void createMeeting(WebSocketSession session, String payload) {
        CommonRequest request = createCommonRequest("tingwu.cn-beijing.aliyuncs.com", "2023-09-30", ProtocolType.HTTPS, MethodType.PUT, "/openapi/tingwu/v2/tasks");
        request.putQueryParameter("type", "realtime");

        JSONObject root = new JSONObject();
        root.put("AppKey", appKey);
        // 创建任务时设置的基本信息参数

        cn.hutool.json.JSONObject json = new cn.hutool.json.JSONObject(payload);
        String language = json.getStr("language");
        long startTime = Long.parseLong(json.getStr("startTime"));
        startTimeMap.put(session, startTime);
        Integer diarizationEnabled = Integer.valueOf(json.getStr("diarizationEnabled"));

        JSONObject input = new JSONObject();
        input
                .fluentPut("SourceLanguage", language)
                .fluentPut("Format", "pcm")
                .fluentPut("SampleRate", 16000)
                .fluentPut("TaskKey", "task" + startTime);
        // 如果是多通道(多路)音频流识别的话，请开启MultipleStreamsEnabled。 绝大多数场景下均不是多通道音频流场景。建议你测试时无须开启。
        //input.put("MultipleStreamsEnabled", true);
        root.put("Input", input);
        // 创建任务时设置的算法相关参数。您可以按需设置。
        JSONObject parameters = initRequestParameters(diarizationEnabled);
        root.put("Parameters", parameters);


        request.setHttpContent(root.toJSONString().getBytes(), "utf-8", FormatType.JSON);

        DefaultProfile profile = DefaultProfile.getProfile("cn-beijing", accessKeyId, accessKeySecret);
        IAcsClient client = new DefaultAcsClient(profile);
        CommonResponse response = null;
        try {
            response = client.getCommonResponse(request);
        } catch (ClientException e) {
            log.error("创建会议异常", e);
            throw new RuntimeException(e);
        }
        JSONObject body = JSONObject.parseObject(response.getData());
        JSONObject data = (JSONObject) body.get("Data");
         String taskId = data.getString("TaskId");
        taskIdMap.put(session, taskId);
        String meetingJoinUrl = data.getString("MeetingJoinUrl");
        meetingJoinUrlMap.put(session, meetingJoinUrl);
        log.info("----------创建会议成功,session={},taskid={},入参={}", session, taskId, payload);
        try {
            SpeechTranscriber speechTranscriber = new SpeechTranscriber(NLS_CLIENT, "default", createListener(session), meetingJoinUrl);
            speechTranscriber.start();
            transcriberMap.put(session, speechTranscriber);
            log.info("----------建立连接成功,session={},taskid={}",  session, taskId);
        } catch (Exception e) {
            log.info("建立连接异常", e);
            throw new RuntimeException(e);
        }
    }


    @Override
    public void afterConnectionClosed(WebSocketSession session, CloseStatus status) throws Exception {
        String taskId = taskIdMap.get(session);
        log.info("----------关闭连接,session={},taskid={}",  session, taskId);
        super.afterConnectionClosed(session, status);
        meetingJoinUrlMap.get(session);
        SpeechTranscriber speechTranscriber = transcriberMap.get(session);
        if (speechTranscriber != null) {
            try {
                if (speechTranscriber.getState() != SpeechTranscriber.State.STATE_CLOSED) {
                    speechTranscriber.stop();
                    log.info("----------关闭连接成功,session={},taskid={}",  session, taskId);
                }
            } catch (Exception error) {
                log.error("关闭连接异常", error);
            } finally {
                speechTranscriber.close();
            }
        }
    }

    /**
     * 初始化请求参数
     * @param diarizationEnabled
     * @return
     */
    private JSONObject initRequestParameters(Integer diarizationEnabled) {
        JSONObject parameters = new JSONObject();

        // 音视频转换： 可选
//        JSONObject transcoding = new JSONObject();
        //transcoding.put("TargetAudioFormat", "mp3");
        //transcoding.put("SpectrumEnabled", false);
//        parameters.put("Transcoding", transcoding);

        // 语音识别
        JSONObject transcription = new JSONObject();
        boolean diarizationEnabledBool = diarizationEnabled != null && diarizationEnabled == 1;
        if (diarizationEnabledBool) {
            transcription.put("DiarizationEnabled", diarizationEnabledBool);
            JSONObject speakerCount = new JSONObject();
            speakerCount.put("SpeakerCount", 0);
            transcription.put("Diarization", speakerCount);
        }
        transcription.put("OutputLevel", 2); // 1：识别出完整句子时返回识别结果  2：识别出中间结果及完整句子时返回识别结果
        parameters.put("Transcription", transcription);

        // 翻译： 可选
//        JSONObject translation = new JSONObject();
//        JSONArray langArry = new JSONArray();
//        translation.put("TargetLanguages", langArry);
//        parameters.put("Translation", translation);
//        parameters.put("TranslationEnabled", false);

        // 章节速览： 可选
//        parameters.put("AutoChaptersEnabled", true);

        // 智能纪要： 可选
//        parameters.put("MeetingAssistanceEnabled", true);

        // 摘要相关： 可选
//        parameters.put("SummarizationEnabled", true);
//        JSONObject summarization = new JSONObject();
//        JSONArray types = new JSONArray()
//                .fluentAdd("Paragraph");
                //                                .fluentAdd("Conversational")
                //                                .fluentAdd("QuestionsAnswering");
//        summarization.put("Types", types);
//        parameters.put("Summarization", summarization);
        return parameters;
    }

    /**
     * 创建API请求
     * @param domain
     * @param version
     * @param protocolType
     * @param method
     * @param uri
     * @return
     */
    public CommonRequest createCommonRequest(String domain, String version, ProtocolType protocolType, MethodType method, String uri) {
        // 创建API请求并设置参数
        CommonRequest request = new CommonRequest();
        request.setSysDomain(domain);
        request.setSysVersion(version);
        request.setSysProtocol(protocolType);
        request.setSysMethod(method);
        request.setSysUriPattern(uri);
        request.setHttpContentType(FormatType.JSON);
        return request;
    }


    /**
     * 创建监听器
     * @param session
     * @return
     */
    public SpeechTranscriberListener createListener(WebSocketSession session) {
        return new SpeechTranscriberListener() {
            @Override
            public void onMessage(String message) {
                if (message == null || message.trim().length() == 0) {
                    return;
                }
                SpeechTranscriberResponse response = JSON.parseObject(message, SpeechTranscriberResponse.class);
                if ("ResultTranslated".equals(response.getName())) {
                    // 翻译事件输出，您可以在此处进行相关处理
                    // System.out.println("--- 翻译事件输出 ---" + JSON.toJSONString(response, SerializerFeature.PrettyFormat));
                } else {
                    // 原语音识别事件输出，交由父类负责回调
                    super.onMessage(message);
                }
            }

            @Override
            public void onTranscriberStart(SpeechTranscriberResponse response) {
                // task_idf非常重要，但需要说明的是，该task_id是在音频流实时推送和识别过程中的标识，而非会议级别的TaskId
                log.info("task_id: " + response.getTaskId() + ", name: " + response.getName() + ", status: " + response.getStatus());
            }

            @Override
            public void onSentenceBegin(SpeechTranscriberResponse response) {
                // 当识别出一句新的话时会首先返回该事件，表明新的一句话开始
//                 System.out.println("----------onSentenceBegin: " + JSON.toJSONString(response));
            }

            @Override
            public void onSentenceEnd(SpeechTranscriberResponse response) {
                //识别出一句话。服务端会智能断句，当识别到一句话结束时会返回此消息。
//                log.info("----------onSentenceEnd: " + JSON.toJSONString(response));
//                System.out.println("task_id: " + response.getTaskId() +
//                        ", name: " + response.getName() +
//                        // 状态码“20000000”表示正常识别。
//                        ", status: " + response.getStatus() +
//                        // 句子编号，从1开始递增。
//                        ", index: " + response.getTransSentenceIndex() +
//                        // 当前的识别结果。
//                        ", result: " + response.getTransSentenceText() +
//                        // 当前的词模式识别结果。
//                        ", words: " + response.getWords() +
//                        // 开始时间
//                        ", begin_time: " + response.getSentenceBeginTime() +
//                        // 当前已处理的音频时长，单位为毫秒。
//                        ", time: " + response.getTransSentenceTime());

                // 当前的识别结果(不同于response.getTransSentenceText()， 此处的识别结果可能会出现变化)
//                SpeechTranscriberResponse.StashResult stashResult = response.getStashResult();
                // 将上面两段识别结果拼接起来
//                String stashText = stashResult == null ? "" : stashResult.getText();
                // 当前的识别结果(固定的，不再变化的识别结果)
                sendMsg(response, session, true);
//                String text = response.getTransSentenceText();
//                System.out.println("[onSentenceEnd] text = " + text);
            }

            @Override
            public void onTranscriptionResultChange(SpeechTranscriberResponse response) {
                // 识别出中间结果。仅当RealtimeResultLevel=2时，才会返回该消息。
//                log.info("------识别中间结果 = " + JSON.toJSONString(response));
                //                System.out.println("task_id: " + response.getTaskId() +
//                        ", name: " + response.getName() +
//                        // 状态码“20000000”表示正常识别。
//                        ", status: " + response.getStatus() +
//                        // 句子编号，从1开始递增。
//                        ", index: " + response.getTransSentenceIndex() +
//                        // 当前的识别结果。
//                        ", result: " + response.getTransSentenceText() +
//                        // 当前的词模式识别结果。
//                        ", words: " + response.getWords() +
//                        // 当前已处理的音频时长，单位为毫秒。
//                        ", time: " + response.getTransSentenceTime());
                sendMsg(response, session,false);
//                log.info("------识别中间结果 = " + response.getTransSentenceText());
            }

            @Override
            public void onTranscriptionComplete(SpeechTranscriberResponse response) {
                // 识别结束，当调用speechTranscriber.stop()之后会收到该事件
                log.info("------识别结束: " + JSON.toJSONString(response));
            }

            @Override
            public void onFail(SpeechTranscriberResponse response) {
                log.error("实时识别出错: " + JSON.toJSONString(response));
            }
        };
    }

    /**
     * 发送消息
     *
     * @param response
     * @param session
     * @param endFlag
     */
    private void sendMsg(SpeechTranscriberResponse response, WebSocketSession session, boolean endFlag) {
        MeetingMsg textMessage = buildResultData(response,endFlag, startTimeMap.get(session));
        try {
            if (session.isOpen()) {
                String jsonString = JSON.toJSONString(textMessage);
                session.sendMessage(new TextMessage(jsonString));
                log.info("-----------发送消息:{},taskId:{}", jsonString,response.getTaskId());
            }
        } catch (IOException e) {
            log.error("发送消息异常", e);
        }
    }

    /**
     * 构建实时返回数据包
     *
     * @param response
     * @param endFlag
     * @param starTime
     * @return
     */
    private MeetingMsg buildResultData(SpeechTranscriberResponse response, boolean endFlag, long starTime) {
        String text = response.getTransSentenceText();
        List<SpeechTranscriberResponse.Word> words = response.getWords();
        Long sentenceBeginTime = words.get(0).getStartTime();
        starTime = starTime + sentenceBeginTime;
        String formattedStartTime = DateUtil.date(starTime).toString("yyyy-MM-dd HH:mm:ss");
        return MeetingMsg.builder().content(text).startTime(formattedStartTime).endFlag(endFlag).build();
    }
}