package org.tio.showcase.websocket.server;

import java.io.FileOutputStream;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;

import com.alibaba.dashscope.audio.asr.translation.TranslationRecognizerParam;
import com.alibaba.dashscope.audio.asr.translation.TranslationRecognizerRealtime;
import com.alibaba.dashscope.audio.asr.translation.results.Translation;
import com.alibaba.dashscope.exception.NoApiKeyException;
import io.reactivex.Flowable;
import io.reactivex.processors.UnicastProcessor;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.tio.core.Tio;
import org.tio.core.ChannelContext;
import org.tio.http.common.HttpRequest;
import org.tio.http.common.HttpResponse;
import org.tio.websocket.common.WsRequest;
import org.tio.websocket.common.WsResponse;
import org.tio.websocket.common.WsSessionContext;
import org.tio.websocket.server.handler.IWsMsgHandler;

/**
 * @author tanyaowu
 * 2017年6月28日 下午5:32:38
 */
public class ShowcaseWsMsgHandler implements IWsMsgHandler {
    private static Logger log = LoggerFactory.getLogger(ShowcaseWsMsgHandler.class);
    public static final ShowcaseWsMsgHandler me = new ShowcaseWsMsgHandler();
    public static final String target_language = "en";

    private static final Map<String, UnicastProcessor<ByteBuffer>> UNICAST_PROCESSOR_MAP = new ConcurrentHashMap<>();

    private ShowcaseWsMsgHandler() {

    }

    /**
     * 保证一个Client只有一个音频处理器
     */
    public UnicastProcessor<ByteBuffer> getAudioProcessor(ChannelContext channelContext) {
        return UNICAST_PROCESSOR_MAP.computeIfAbsent(channelContext.getId(), s -> {
            log.info("create audio processor for channel {}", channelContext.getId());

            UnicastProcessor<ByteBuffer> audioProcessor = UnicastProcessor.create();
            Flowable<ByteBuffer> audioFlowable = audioProcessor.onBackpressureBuffer();

            // 创建Recognizer
            TranslationRecognizerRealtime translator = new TranslationRecognizerRealtime();
            // 创建TranslationRecognizerParam，audioFrames参数中传入上面创建的Flowable<ByteBuffer>
            TranslationRecognizerParam param =
                    TranslationRecognizerParam.builder()
                            .model("gummy-realtime-v1")
                            .format("pcm") // 'pcm'、'wav'、'opus'、'speex'、'aac'、'amr', you
                            // can check the supported formats in the document
                            .sampleRate(16000) // supported 8000、16000
                            .apiKey("sk-9a90b5508e2e4e2c90418136245e239a")
                            .transcriptionEnabled(true)
                            .translationEnabled(true)
                            .translationLanguages(new String[]{target_language})
                            .build();

            // Stream call interface for streaming audio to recognizer
            try {
                translator
                        .streamCall(param, audioFlowable)
                        .subscribe(result -> {
                            if (result.getTranscriptionResult() != null) {
                                // 打印最终结果
                                String transcriptionText = result.getTranscriptionResult().getText();
                                if (result.isSentenceEnd()) {
                                    System.out.println("Fix: " + transcriptionText);
                                    System.out.println("Stash: " + result.getTranscriptionResult().getStash());
                                } else {
                                    System.out.println("Temp Result:" + result.getTranscriptionResult().getText());
                                }
                                // 构建响应消息
                                String responseMessage = "{name:'server',message:'" + transcriptionText + "'}";

                                // 发送响应消息给客户端
                                WsResponse wsResponse = WsResponse.fromText(responseMessage, ShowcaseServerConfig.CHARSET);
                                Tio.send(channelContext, wsResponse);
                            }
                            if (result.getTranslationResult() != null) {
                                Translation targetTranslation = result.getTranslationResult().getTranslation(target_language);
                                if (targetTranslation != null) {
                                    if (result.isSentenceEnd()) {
                                        System.out.println("Fix to " + target_language + ": " + targetTranslation.getText());
                                        System.out.println("Stash to " + target_language + ": " + targetTranslation.getStash());
                                    } else {
                                        System.out.println("Temp Result:" + result.getTranslationResult().getTranslation("en").getText());
                                    }
                                }
                                // 构建响应消息
                                String responseMessage = "{name:'server',message:'" + targetTranslation.getText() + "'}";

                                // 发送响应消息给客户端
                                WsResponse wsResponse = WsResponse.fromText(responseMessage, ShowcaseServerConfig.CHARSET);
                                Tio.send(channelContext, wsResponse);
                            }
                            if (result.isSentenceEnd()) {
                                System.out.println("\tRequestId: " + result.getRequestId() + " Usage: " + result.getUsage());
                            }
                        }, throwable -> {
                            log.error("Error during translation", throwable);
                        }, () -> {
                            System.out.println("Recognition onComplete! , exit program...");
                        });
            } catch (NoApiKeyException e) {
                throw new RuntimeException(e);
            }
            System.out.println("Recognition onComplete! , exit program...");
            return audioProcessor;
        });
    }

    /**
     * 握手时走这个方法，业务可以在这里获取cookie，request参数等
     */
    @Override
    public HttpResponse handshake(HttpRequest request, HttpResponse httpResponse, ChannelContext channelContext) throws Exception {
        String clientip = request.getClientIp();
        String myname = request.getParam("name");
        String encoding = request.getParam("encoding");

        Tio.bindUser(channelContext, myname);
		channelContext.setUserid(myname);
        channelContext.setAttribute("encoding", encoding);
        log.info("收到来自{}的ws握手包 {} {} ", clientip, request.toString(), encoding);
        return httpResponse;
    }

    /**
     * @param httpRequest
     * @param httpResponse
     * @param channelContext
     * @throws Exception
     * @author tanyaowu
     */
    @Override
    public void onAfterHandshaked(HttpRequest httpRequest, HttpResponse httpResponse, ChannelContext channelContext) throws Exception {
        //绑定到群组，后面会有群发
        Tio.bindGroup(channelContext, Const.GROUP_ID);
        int count = Tio.getAll(channelContext.tioConfig).getObj().size();

        String msg = "{name:'admin',message:'" + channelContext.userid + " 进来了，共【" + count + "】人在线" + "'}";
        //用tio-websocket，服务器发送到客户端的Packet都是WsResponse
        WsResponse wsResponse = WsResponse.fromText(msg, ShowcaseServerConfig.CHARSET);
        //群发
        Tio.sendToGroup(channelContext.tioConfig, Const.GROUP_ID, wsResponse);
    }

    /**
     * 字节消息（binaryType = arraybuffer）过来后会走这个方法
     */
    @Override
    public Object onBytes(WsRequest wsRequest, byte[] bytes, ChannelContext channelContext) throws Exception {
        log.info("收到字节消息：{} {}", bytes.length, channelContext.toString(), channelContext.get("encoding"));
        // 方式1
		// 处理文件数据，例如写入文件系统等操作...
		FileOutputStream fos = new FileOutputStream("outputFile.mp4");
		fos.write(bytes);
		fos.close();

        // TODO 怎么关闭 Flowable
//        UnicastProcessor<ByteBuffer> audioProcessor = getAudioProcessor(channelContext);
//        // 方式2:
//        audioProcessor.onNext(ByteBuffer.wrap(bytes)); // 将数据发送到RxJava Flowable中处理

        //返回值是要发送给客户端的内容，一般都是返回null
        return null;
    }

    /**
     * 当客户端发close flag时，会走这个方法
     */
    @Override
    public Object onClose(WsRequest wsRequest, byte[] bytes, ChannelContext channelContext) throws Exception {
        UnicastProcessor<ByteBuffer> audioProcessor = UNICAST_PROCESSOR_MAP.remove(channelContext.getId());
        if (audioProcessor != null) {
            audioProcessor.onComplete(); // 关闭 UnicastProcessor
        }
        Tio.remove(channelContext, "receive close flag");
        return null;
    }

    /*
     * 字符消息（binaryType = blob）过来后会走这个方法
     */
    @Override
    public Object onText(WsRequest wsRequest, String text, ChannelContext channelContext) throws Exception {
        WsSessionContext wsSessionContext = (WsSessionContext) channelContext.get();
        HttpRequest httpRequest = wsSessionContext.getHandshakeRequest();//获取websocket握手包
        if (log.isDebugEnabled()) {
            log.debug("握手包:{}", httpRequest);
        }

        log.info("收到ws消息:{}", text);

        if (Objects.equals("心跳内容", text)) {
            return null;
        }
        //channelContext.getToken()
        //String msg = channelContext.getClientNode().toString() + " 说：" + text;
        String msg = "{name:'" + channelContext.userid + "',message:'" + text + "'}";
        //用tio-websocket，服务器发送到客户端的Packet都是WsResponse
        WsResponse wsResponse = WsResponse.fromText(msg, ShowcaseServerConfig.CHARSET);
        //群发
        Tio.sendToGroup(channelContext.tioConfig, Const.GROUP_ID, wsResponse);

        //返回值是要发送给客户端的内容，一般都是返回null
        return null;
    }

}
