package com.example.cscot;

import android.util.Log;

import com.iflytek.sparkchain.core.LLM;
import com.iflytek.sparkchain.core.LLMCallbacks;
import com.iflytek.sparkchain.core.LLMConfig;
import com.iflytek.sparkchain.core.LLMError;
import com.iflytek.sparkchain.core.LLMEvent;
import com.iflytek.sparkchain.core.LLMOutput;
import com.iflytek.sparkchain.core.LLMResult;
import com.iflytek.sparkchain.core.Memory;
import com.iflytek.sparkchain.core.SparkChain;
import com.iflytek.sparkchain.core.SparkChainConfig;

import org.json.JSONArray;
import org.json.JSONObject;

import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Objects;

public class XfSpark {
    public static int gNumLlm = 0;
    public static int gInitSdk = -1;
    // <<< LLM
    private static final String TAG = "XF_SPARK";
    public boolean llmSessionFinished = true;   // 确保同一个时刻只有一个异步对话
    public boolean llmAppendMode;               // 异步对话中是否每回一部分都调一下用户自己的回调函数
    public String llmAresponse;                 // 异步对话的完整答案
    public RtCallback llmCb;                    // 系统LLMcallback中需要调用的用户自定义回调函数
    private LLM llm;                            // LLM讯飞的对话框架
    LLMCallbacks llmCallbacks = new LLMCallbacks() {
        @Override
        public void onLLMResult(LLMResult llmResult, Object usrContext) {
            // context可以用来区别并发问题的答案，防止答案组合错误。但我们会避免并发。
            String msg;
            String content = llmResult.getContent();
            int status = llmResult.getStatus();
            if(!content.isEmpty()) {
                // llmAresponse = llmAresponse.concat(content); // don't use this = JSONized
                llmAresponse += content;
                if ((llmAppendMode) && (llmCb != null)) {
                    llmCb.onRightTime(content);
                }
            }
            // myApplication.gLogText += TS() + llmAresponse + "\n";
            if (status == 2) {
                // 答案的最后一个token回来啦
                int completionTokens = llmResult.getCompletionTokens();
                int promptTokens = llmResult.getPromptTokens();
                int totalTokens = llmResult.getTotalTokens();
                msg = " completionTokens: " + completionTokens;
                msg += " promptTokens: " + promptTokens;
                msg += " totalTokens: " + totalTokens;
                WeiLe.gLogText += TS() + msg + "\n";
                llmSessionFinished = true;
                if ((llmAppendMode == false) && (llmCb != null)) {
                    llmCb.onRightTime(llmAresponse);
                }
            }
        }

        @Override
        public void onLLMEvent(LLMEvent event, Object usrContext) {
            String msg;
            msg = "onLLMEvent:" + " " + event.getEventID() + " " + event.getEventMsg();
            Log.d(TAG, msg);
            WeiLe.gLogText += TS() + msg + "\n";
            llmSessionFinished = true;
        }

        @Override
        public void onLLMError(LLMError error, Object usrContext) {
            String msg;
            msg = "errCode:" + error.getErrCode() + "errDesc:" + error.getErrMsg();
            Log.d(TAG, msg);
            WeiLe.gLogText += TS() + msg + "\n";
            llmSessionFinished = true;
        }
    };
    // >>> LLM
    public XfSpark() {
        int ret = initXfSpark("doInitNow");
        // 这个doInitNow就不是正规的用法，属于hack性质，因为我们不想再去改讯飞代码啦
        // 又想他能和新框架适配起来。
        // external initXfSpark will not register it twice.
        return;
    }

    public int initXfSpark(String strInit) {
        if (gInitSdk != 0) {
            // 初始化SDK，Appid等信息在清单中配置
            SparkChainConfig sparkChainConfig = SparkChainConfig.builder();
            // 应用申请的appid三元组
            sparkChainConfig.appID("27e1352c")
                    .apiSecret("YzdhZTUwMzNkODgxN2VlODFmMTY3N2Zm")
                    .apiKey("591986f6180ac534b293c4135bc3779b")
                    .logLevel(3);
            if (!Objects.equals(System.getProperty("os.arch"), "x86_64")) {
                // x86_64会直接死机，跳过
                // 同一时刻，只有一个活跃的getInst().init
                gInitSdk = SparkChain.getInst().init(WeiLe.mainActivity.getApplicationContext(), sparkChainConfig);
            }
            if (gInitSdk != 0) {
                WeiLe.gLogText += TS() + gNumLlm + "=num SDK is initialized: init=";
            } else {
                WeiLe.gLogText += TS() + gNumLlm + "=num SDK is NOT ready: init=";
            }
            WeiLe.gLogText += gInitSdk + " " + System.getProperty("os.arch") + "=os.arch\n";
        }
        if ((0 == gInitSdk) && (strInit.equals("doInitNow"))) {
            // Spark4.0 Ultra 请求地址，对应的domain参数为4.0Ultra：
            // wss://spark-api.xf-yun.com/v4.0/chat
            // Spark Max请求地址，对应的domain参数为generalv3.5：
            // wss://spark-api.xf-yun.com/v3.5/chat
            // Spark Pro-128K请求地址，对应的domain参数为pro-128k：
            // wss://spark-api.xf-yun.com/chat/pro-128k
            LLMConfig llmConfig = LLMConfig.builder();
            llmConfig.domain("4.0Ultra");
            llmConfig.url("wss://spark-api.xf-yun.com/v4.0/chat");
            //
            // maxToken     回答的tokens的最大长度               取值范围1-8192，默认：4096
            // temperature  配置核采样阈值，改变结果的随机程度     取值范围 (0，1] ，默认：0.5
            //              当temperature较低时，‌模型变得更加专注和确定性，‌选择概率最高的标记，
            //              ‌生成的文本相对可预测，‌但可能较为单调。‌
            // topK         配置从k个候选中随机选择⼀个（⾮等概率) 取值范围1-6，默认值：4
            llmConfig.maxToken(8192);
            llmConfig.temperature((float)0.4);
            llmConfig.topK(3);
            //
            //memory有两种，windows_memory和tokens_memory，二选一即可
            // Memory window_memory = Memory.windowMemory(5);
            // llm = new LLM(llmConfig,window_memory);
            Memory tokens_memory = Memory.tokenMemory(8192);
            llm = new LLM(llmConfig, tokens_memory);
            llm.registerLLMCallbacks(llmCallbacks);
        }
        gNumLlm += 1;   // 总是成功
        return 0;
    }

    // https://blog.51cto.com/u_16213372/7744907
    // Java中的类析构函数不需要显式地定义，垃圾回收器会在对象不再被引用时自动释放对象所占用的内存空间。
    // 保留这个，提示自己这是Java知识点
    @Override
    protected void finalize() throws Throwable {
    }

    public int uninitXfSpark() {
        gNumLlm -= 1;
        if (gNumLlm == 0) {
            if (gInitSdk == 0) {
                SparkChain.getInst().unInit();
                gInitSdk = -1;
            }
        }
        Log.d(TAG, gNumLlm + "=num init=" + gInitSdk);
        return 0;
    }

    // 简单，但不要轻易用，容易卡住，进程被杀
    public String talkSDK(String userRequest) {
        if (llm == null){
            Log.e(TAG,"toSDK failed, please setLLMConfig before!");
            return "";
        }
        String msg;
        llmSessionFinished = false;
        LLMOutput syncOutput = llm.run(userRequest);
        llmSessionFinished = true;
        if(syncOutput.getErrCode() == 0) {
            return syncOutput.getContent();
        }else {
            msg = "同步调用：" +  "errCode" + syncOutput.getErrCode();
            msg += " errMsg:" + syncOutput.getErrMsg();
            Log.e(TAG, msg);
            WeiLe.gLogText += TS() + msg + "\n";
            return "";
        }
    } // talkSDK

    // 专门用来设置全局印象的设置的，也就是智能体背景设定的，设定后就一直起作用的。
    // 测试证明是有效的，但是好像只有5000个汉字内是有效的。所以以后可以改进成每4096个字自动调用一次
    public void prepareRole(String whatRole, RtCallback cb) {
        String msg;
        if (!llmSessionFinished) {
            msg = "Last is not done";
            WeiLe.gLogText += TS() + msg + "\n";
            return;
        }
        try{
            JSONArray array = new JSONArray();
            JSONObject item1 = new JSONObject();
            item1.put("role", "system");
            item1.put("content", whatRole);
            array.put(item1);
            JSONObject item2 = new JSONObject();
            item2.put("role","user");
            item2.put("content", "你好！");
            array.put(item2);
            llmAresponse = "";  // zero-string
            llmSessionFinished = false;
            llmCb = cb;
            llmAppendMode = false;
            int ret = llm.arun(array.toString(), "myContext");
            if (ret != 0) {
                llmSessionFinished = true;
                msg = "llm.arun() != 0, but = " + ret;
                WeiLe.gLogText += TS() + msg + "\n";
            }
        } catch(Exception e) {
            llmSessionFinished = true;
            msg = "Something wrong happened in llm.arun()";
            WeiLe.gLogText += TS() + msg + "\n";
        }
        return;
    }

    public void postSDK(JSONArray array, String userRequest, RtCallback cb, boolean appendMode) {
        String msg;
        if (llm == null){
            msg = "toSDK failed, please setLLMConfig before!";
            Log.d(TAG, msg);
            WeiLe.gLogText += TS() + msg + "\n";
            return;
        }
        if (!llmSessionFinished) {
            msg = "Last is not done";
            Log.d(TAG, msg);
            WeiLe.gLogText += TS() + msg + "\n";
            return;
        }
        try{
            JSONObject item1 = new JSONObject();
            item1.put("role","user");
            item1.put("content", userRequest);
            array.put(item1);
            llmAresponse = "";  // zero-string
            llmSessionFinished = false;
            llmCb = cb;
            llmAppendMode = appendMode;
            int ret = llm.arun(array.toString(), "myContext");
            if (ret != 0) {
                llmSessionFinished = true;
                msg = "llm.arun() != 0, but = " + ret;
                WeiLe.gLogText += TS() + msg + "\n";
            }
        } catch(Exception e) {
            llmSessionFinished = true;
            msg = "Something wrong happened in llm.arun()";
            WeiLe.gLogText += TS() + msg + "\n";
        }
        return;
    }

    public static String TS() {
        return LocalDateTime.now().format(DateTimeFormatter.ofPattern("dd日HH:mm:ss.SSS "));
    }
}
