//import { AssetField } from "../inspector/field/AssetField";
import fs from 'fs';
import path from 'path';
import { AIAction } from "./AIAction";
import { AIConfig, LayaAPICode, LayaAPIStatus } from "./AIConfig";
import { DemoServer } from './AIDemo';
import { AIInterface_Editor } from "./AIInterface_Editor";
import { AIMemoryManager } from "./AIMemory";
import { AIPanel } from "./AIPanel";
import { AIPluginMgr } from "./AIPluginMgr";
import { AIScriptMgr } from './AIScriptClassMgr';
import { AITaskGenAudioNode } from "./AITask/AITaskGenAudio";
import { AITaskGenImageNode } from "./AITask/AITaskGenImage";
import { AITaskGenSkyImg } from "./AITask/AITaskGenSkyImg";
import { AITaskTTS } from './AITask/AITaskTTS';
import { trimJavascript } from "./AIUtils";
import { addChatChannel, chatChannels } from './ChatChannels';
import { IDEAgent } from "./IDEAgent";
import { IatRecorder } from './IatRecorder';
import { addLog, chatgpt_chat, getAudio, getImage, post } from "./Imagen";
import { LayaAPI } from "./LayaAPI";
import { 点数不够 } from './StoreServer';
import { Thinkingdata } from './Thinkingdata';
import { randomTree } from "./TreeEditor/gendata";
import { ButtonList } from './field/ButtonList';
let ort = require('onnxruntime-web')

AITaskGenAudioNode;
AITaskGenImageNode;
AIScriptMgr

/**
 * 利用LLM的推理能力
 *
 */
export class AICore {
    static inst: AICore = null
    ideAgent:IDEAgent;
    //plugins = new AIPluginsMgr()
    version = '0.6';
    ort: any;
    pluginMgr = new AIPluginMgr;
    memory: AIMemoryManager;
    AIPluginPath:string;

    // 根据优先级可以设置到不同的地方
    private _onUserInput: (msg: string, retcb: (str: string) => void) => void = this.processInput.bind(this);

    constructor() {
        if(!fs.existsSync(Editor.projectPath +"/.layaai/")){
            fs.mkdirSync(Editor.projectPath +"/.layaai/");
        }

        this.ideAgent = new IDEAgent();
        AICore.inst = this;
        this.ort = ort;
        //requestAnimationFrame(this.onUpdate.bind(this));
        this.init();    //异步初始化
    }

    async init(){
        this.ideAgent.init();
        LayaAPI.setUserInfo(this.ideAgent.getSafeUserData())

        let aipath = await Editor.assetDb.getAsset(`editorResources/forFindAIPath.svg`,true);
        let path = Editor.assetDb.getFullPath(aipath);
        let lastp = Math.max(path.lastIndexOf('\\'),path.lastIndexOf('/'));
        this.AIPluginPath = path.substring(0,lastp);    //最后没有/
        this.pluginMgr.regAIPlugin('editor', new AIInterface_Editor())
        this.memory = new AIMemoryManager(this);
    }

    private async initONNXRuntime() {
        if (ort.env.wasm.wasmPaths)
            return;
        //设置加载wasm的方法，得到wasm绝对路径
        let defonnxwasm = await Editor.assetDb.getAsset('editorResources/wasm/ort-wasm.wasm', true);
        let wasmpath = Editor.assetDb.getFullPath(defonnxwasm);
        //如果设置了wasmPaths，则从这个目录下加载
        ort.env.wasm.wasmPaths = path.parse(wasmpath).dir + '/';
    }

    testImage(prompt = "a dog wearing a hat") {
        getImage(prompt, 512, 512);
    }

    testI2I(img: string, prompt: string) {
        //img2img(img, null, prompt);
    }

    testSetTexture() {
        let js = `
        (async function() {
            let selectedNodes = editor.getSelected();
            if (selectedNodes.length > 0) {
                let node = selectedNodes[0];
                let material = await node.get_material();
                if (material) {
                    let img = await editor.generateImage("一个美女，全身");
                    await material.set_texture(img);
                }
            }
        })();        
`;

        js =
            `
let selectedNode = editor.getSelected()[0];
let img = await editor.generateImage("一个美女，全身");
let mtl = await selectedNode.get_material();
if (mtl) {
    await mtl.set_texture(img);
}
`
        this.pluginMgr.runjs(js);
    }

    testjs1() {
        let js = `
        let posX = 0;

        for(let i=0; i<10; i++){
            let box = await editor.createBox();
            box.position = new Vector3(posX, 0, 0);
            posX += 2;
        }        
        `
        this.pluginMgr.runjs(js);
    }

    testSetSky() {
        let js = `
        let skyImage = await editor.generatePanoramaImage("蓝天白云", 2048, 1024);
        let skyMaterial = await editor.createMaterial("SkyPanoramic", "skyMaterial");
        await skyMaterial.set_texture(skyImage);
        let scene = editor.getScene();
        scene.set_sky_material(skyMaterial);        
`
        this.pluginMgr.runjs(js);
    }

    testError() {
        let js = `let lastMusicTask = editor.getLastAudioGenerateTask();
        lastMusicTask.prompt = "欢快的笛子演奏";
        await lastMusicTask.generate();`
        this.pluginMgr.runjs(js);
    }

    testSkyError() {
        let js = `async function setSkyboxToRainforest() {
            const skyboxMaterial = editor.createMaterial("SkyPanoramic", "RainforestSkyboxMaterial");
            const panoramaImage = editor.generatePanoramaImage("Rainforest");
            await skyboxMaterial.set_texture(panoramaImage);
            const scene = editor.getScene();
            scene.set_sky_material(skyboxMaterial);
        }
        
        setSkyboxToRainforest();`
        this.pluginMgr.runjs(js);
    }

    testRefMusic() {
        let js = `
        editor.generateAudio("钢琴演奏", 10, "pianoMusic", 'D:/work/genmusic/桃花岛.mp3');        
        `
        this.pluginMgr.runjs(js);
    }

    async testSky2(prompt: string) {
        let t = new AITaskGenSkyImg();
        t._data.prompt = prompt;
        let asset = await t.genImage();
    }

    async testtts1() {
        let t = new AITaskTTS();
        //await t.update_preset();
        t.execute();
    }

    async testSub(){
        let r = await 点数不够('')
        alert(r);
    }

    async testts2(){
        let t = new AITaskTTS();
        t.data.charactor='4fe842e8d585c0f44189e2f570b4e57cf29dc86f'
        t.data.text="总有刁民想害朕"
        t.execute();
    }
    async testvallex(str: string, who: string) {
        let mgr = this.pluginMgr;
        let editordef = mgr.interfaces['editorcore'];
        let editor = editordef.depVars['editor']
        let ret = await editor.textToSpeech(str, who);
        let fpath = Editor.assetDb.getFullPath(ret);
        let newfile = path.dirname(fpath) + '/' + str + '.wav'
        fs.renameSync(fpath, newfile);
        //语音识别一下
        DemoServer.getNextMicInput = () => {
            return newfile;
        }
    }

    async testQA(q: string) {
        console.log('查询中');
        let svurl = "http://10.10.20.21:3100/items";
        let ret = await post(svurl, { query: q }) as string
        let rets = JSON.parse(ret) as string[]
        console.log(rets[0])
    }    

    private recwav: MediaRecorder;
    private recid = 0;
    async testRecWav() {
        if (this.recwav) {
            this.recwav.stop();
            //执行一次开始，再执行结束，再执行开始。。。
            this.recwav = null;
            return;
        }
        let rec = await IatRecorder.recordWav(`d:/temp/output${this.recid++}.wav`);
        this.recwav = rec;
        return rec;
    }

    async QA(q: string) {
        let wmsg = await AIPanel.ins.startWaitMsg('查询中');
        let st = Date.now();
        Thinkingdata.ins.track('QAStart',{msg:q})

        let svurl = AIConfig.QAServerUrl;
        let ret = await post(svurl, { query: q }) as string
        let rets = JSON.parse(ret) as string[]
        //console.log(rets[0])
        AIPanel.ins.addMsg(rets[0], -1);
        let dt = (Date.now()-st)/1000;
        Thinkingdata.ins.track('QAEnd',{duration:dt});
        AIPanel.ins.removeMsg(wmsg);

    }

    testChat(msg: string, stream: boolean) {
        chatgpt_chat('你是LayaAirIDE助手', msg, (str) => { console.log(str); }, stream)
    }

    testChat1(system: string, msg: string, stream: boolean) {
        chatgpt_chat(system, msg, (str) => { console.log(str); }, stream)
    }

    async testWenxin(str: string) {
        let res: any = await LayaAPI.create().laya_ernie_bot(str);
        //this.endWaitMsg();
        if (!res || res.code !== LayaAPICode.CodeSuccess || res.data.status !== LayaAPIStatus.TaskStatusDone) {
            return;
        }
        str = res.data.content as string;
        console.log(str);
    }

    /**
     * 一直等到用户有输入，返回输入
     * @returns
     */
    getUserInput() {
        return new Promise<{ msg: string, aiResponse: (str: string) => void }>((resolve) => {
            let old = this._onUserInput;
            this._onUserInput = (msg, aiResponse) => {
                console.log('inputs:', msg)
                this._onUserInput = old;
                resolve({ msg, aiResponse });
            }
        });
    }

    //private _userInputs:{msg:string,aiResponse:(str:string)=>void}[]=[];

    async cmd(msg: string, aiResponse: (str: string) => void) {
        this._onUserInput(msg, aiResponse);
        //this._userInputs.push({msg,aiResponse});
    }

    private async procssInputByLLM(msg: string) {
        let channel = AIPanel.ins.chatChannel;
        let func = chatChannels[channel];
        if(func){
            await func(msg);
        }
    }

    private async idectrl(msg:string){
        //插件的话，需要通过函数插入接口和函数
        //所有的函数都是按照异步来算
        //假设他知道了基本的Vector3，Quaternion

        //树的种类就不放到prompt中了，报错和列表都在实现中，减少接口大小

        let prompt = `
你是一个精通js的IDE助手,你会理解用户的意图,做出适当的回应,如果用户对你发出指令,则并把用户输入的指令翻译成js脚本(ES6的语法)
针对指令的处理的需求如下：
1. 直接输出js程序,不要任何解释说明，代码也不要注释.
2. 仅仅允许使用下面定义的接口,不允许使用其他接口，不要自创接口。
3. 注意接口函数返回类型是Promise的，需要用await调用
4. 输出是直接执行的脚本，不要封装成函数.

如果用户请求帮助，则调用 editor.help(),并且不要再调用其他接口

接口定义：
\`\`\`javascript
${this.pluginMgr.scriptDefine}
\`\`\`
`;
        let str: string;

        if (this._useGPT) {
            let wmsg = await AIPanel.ins.startWaitMsg('分析指令中');
            //当前正在编辑的种类，用来做命令的上下文，例如 值为 '树'， 则AI可以识别'高一点'应该是说树高一点
            let curEditObj = this.memory.getCurEditObj();
            let curediobjname = curEditObj?.name;
            let cursel = Editor.scene.getSelection()[0];
            //如果当前有选择对象。则当选择对象是先选的，就考虑cureditobj
            if (curediobjname && (!cursel || (curEditObj.editobjname ?? curEditObj.createTime) > this.selectChangeTime)) {
                prompt = prompt + '\n当前操作对象是：' + curediobjname + '\n用户输入是:' + msg;
            } else {
                prompt = prompt + '\n用户输入是:' + msg;
            }
            let sttm = Date.now();
            let res: any = await LayaAPI.create().laya_chatgpt(prompt);
            let dt = Date.now() - sttm;
            console.log('gptdelay:', dt / 1000);
            //str = await chatgpt_chat(prompt, msg, null, false)
            //this.endWaitMsg();
            AIPanel.ins.removeMsg(wmsg);
            if (!res || res.code !== LayaAPICode.CodeSuccess || res.data.status !== LayaAPIStatus.TaskStatusDone) {
                return;
            }
            str = res.data.content as string;
            //addLog({ type: 'parseinput', data: { user: msg, ai: str } }, AICore.inst.ideAgent.getSafeUserData(), null);
        } else {
            let wmsg = await AIPanel.ins.startWaitMsg('分析指令中');
            prompt += `用户指令是:${msg}\n你的输出是:`
            //埋点： cmdParse 开始
            let st = Date.now();
            Thinkingdata.ins.track("cmdParseStart", {msg:msg});
            let res: any = await LayaAPI.create().laya_ernie_bot(prompt);
            let dt = (Date.now()-st)/1000;
            //埋点： cmdParse 结束
            Thinkingdata.ins.track("cmdParseEnd", {duration:dt});

            //this.endWaitMsg();
            AIPanel.ins.removeMsg(wmsg);
            if (!res || res.code !== LayaAPICode.CodeSuccess || res.data.status !== LayaAPIStatus.TaskStatusDone) {
                AIPanel.ins.addMsg('由于服务过于繁忙，本次指令被忽略，麻烦再试一次。',-1);
                Thinkingdata.ins.track('Ernie_bot_err',{});
                return;
            }
            str = res.data.content as string;
        }
        str = trimJavascript(str);
        //文心有时候会返回``包着的代码
        if(str.startsWith('`')){
            str = str.substring(1);
        }
        if(str.endsWith('`')){
            str=str.substring(0,str.length-1)
        }
        console.log('AIScript:', str)
        addLog({type:'CmdParse', data: { user: msg, ai: str }}, AICore.inst.ideAgent.getSafeUserData(),null);
        if (str && str.length > 10) {
            try {
                this.pluginMgr.runjs(str);
            } catch (e) {
                AIPanel.ins.addMsg(str, -1)
            }
        }
    }

    //给界面用的，直接命令，不使用函数调用，这样有的比较方便，例如 AI.runCmd('生成图片')
    async runCmd(msg: string, forcenNewTask = true) {
        this.processInput(msg, null, forcenNewTask);
    }

    private _useGPT = false;
    async processInput(msg: string, retcb: (str: string) => void, forcenNewTask = false) {
        if (msg == '使用gpt') {
            this._useGPT = true;
            return;
        }
        if (msg == '不用gpt') {
            this._useGPT = false;
            return;
        }
        if( msg=='你好'||msg=='你好!'||msg=='你好.'||msg=='你好。'){
            AIPanel.ins.addMsg('你好，有什么可以帮助你的吗？',-1);
            return;
        }
        return this.procssInputByLLM(msg);
    }

    private confirmMsgID: string;
    private confirmResolve: (value: unknown) => void;
    private onConfirmOKClick(e: Event) {
        AIPanel.ins.removeMsg(this.confirmMsgID);
        this.confirmMsgID = null;
        this.confirmResolve(true);
        this.confirmResolve = null;
    }
    private onConfirmCancelClick(e: Event) {
        AIPanel.ins.removeMsg(this.confirmMsgID);
        this.confirmMsgID = null;
        this.confirmResolve(false);
        this.confirmResolve = null;
    }
    //TODO 以后与confirm合并，实现即能按钮也能语音
    async confirm1(msg: string) {
        let ui = msg + `<button onclick="AI.onConfirmOKClick(event)">是</button> <button onclick="AI.onConfirmCancelClick(event)">否</button>`;
        this.confirmMsgID = await AIPanel.ins.addMsg(ui, -1);
        return new Promise((resolve) => {
            this.confirmResolve = resolve;
        });
    }

    tree() {
        //tree test
        randomTree();
    }

    async testGenMusic() {
        let t = new AITaskGenAudioNode();
        t.execute();
    }

    async testA() {
        let buff = await getAudio({ name: 'A cheerful country song with acoustic guitars', temperature: 1.0, duration: 10, mode: 1 }) as ArrayBuffer;
        let file = await this.ideAgent.saveBuffToTmp(Buffer.from(buff), 'mp3');
        this.ideAgent.crateAIAssetFromFile(file, 'music','aigen.mp3');
        // 添加到聊天界面
        //this.print('生成音乐完成，这里要添加到聊天界面');   //TODO
        //AIPanel.ins.addSound(file);
    }

    taskSetSkyPanorama(targetfile: string) {
        IDEAgent.inst.showAIPropUI("AIPropertyPanel");
        //return new SetSkyPanoramaTask(this, targetfile)
    }

    taskOthers() {
        IDEAgent.inst.hideAIPropUI('AIPropertyPanel');
    }


    /**
     * 设置资产属性的AI辅助
     * @param objtype
     * @param propname
     * @param propType
     * @param aitype        TODO 以后只要这个
     */
    AI_Fill_AssetField(assetField: any/*AssetField*/, assetFile: string) {
        //let aiprop = assetField.property.aiprop;
        if (assetField.property.type == "Texture2D") {
            // 贴图又有很多参数，例如全景图，循环贴图之类的
            if (assetField.objType.name == "Shader.SkyPanoramic") {
                //天空球贴图
                //assetField.target.setValue({ _$uuid: asset.id });   //TODO 设置实际的属性
                //let propName = assetField.property.name;
                //assetField.target.setPropertyValue(propName,asset);   //TODO 设置实际的属性
            } else {
                // let task = this.curTask = new SetTextureTask(this, assetFile);
                // task.onEnd = () => {
                //     let asset = task.genAsset;
                //     assetField.target.setValue({ _$uuid: asset.id });   //TODO 设置实际的属性
                // }
            }
        } else if (assetField.objType?.base == "UIComponent" && assetField.property.name == 'skin') {
            //assetField.target.setValue('res://' + asset.id);   //TODO 设置实际的属性
        }
        else {
            this.taskOthers();
        }
        this.callPlugin(assetField.objType.name, assetField.property.name)
    }

    /**
     * TODO 这个以后改成添加一个AIType，然后根据这个类型来获取对应的插件
     * @param objtype
     * @param proptype
     */
    callPlugin(objtype: string, proptype: string) {
        // let plgugin = AIPluginsMgr.getPlugin(objtype);
        // if (plgugin) {
        //     let aiui = plgugin.getUI();
        //     if (aiui) {
        //         let ideAgent = IDEAgent.inst;
        //         ideAgent.showAIPropUI(aiui);
        //     }
        // }
    }

    async tips(msg: string, delaydel = 1000) {
        let tipsid = await AIPanel.ins.addMsg(msg, -1);
        this.say(msg);
        setTimeout(() => {
            AIPanel.ins.removeMsg(tipsid);
        }, delaydel);
    }

    private _curAudio: HTMLAudioElement = null;
    async say(msg: string) {
        return;
        msg = msg.replace(/['"!！@#\[\]&]/g, '');
        let url = AIConfig.TTSServerUrl;
        try {
            //埋点： say 开始
            let st = Date.now();
            Thinkingdata.ins.track("sayStart", {msg:msg});
            let ret = await post(url, { text: msg }, "blob");
            let dt = (Date.now()-st)/1000;
            Thinkingdata.ins.track("sayEnd", {duration:dt});
            //埋点： say 开始
            if (ret) {
                if (this._curAudio) {
                    this._curAudio.pause();
                    this._curAudio.currentTime = 0;
                }
                let audioURL = URL.createObjectURL(ret as Blob)
                let audio = this._curAudio = new Audio(audioURL);
                audio.play();
            }
        } catch (e) {
            console.log(e);
        }
    }

    cameraReset() {
        Editor.scene.runScript('AI_CameraCtrl', 'init', null);
    }

    clearMsg() {
        AIPanel.ins.clear();
    }

    clearLast() {
        AIPanel.ins;;
    }

    //改变选择时间。如果改变选择时间在当前编辑对象之后，则认为能屏蔽当前编辑对象。
    //例如生成图片，然后又选择了盒子，应该避免还以为在生成图片
    private selectChangeTime = new Date();
    async onSelectionChanged() {
        //let sel = Editor.scene.getSelection()[0];
        this.selectChangeTime = new Date();
        this.pluginMgr.onSelectionChanged();
    }

    ////对外提供的接口
    /**
     * 从一个绝对路径加载onnx模型
     * @param modelPath 
     */
    async loadONNX(modelPath: string) {
        await this.initONNXRuntime();
        let abspath = modelPath;
        if (path.isAbsolute(modelPath)) {
        } else {
            let model = await Editor.assetDb.getAsset(modelPath, true);
            abspath = Editor.assetDb.getFullPath(model);
        }
        let fbuff = fs.readFileSync(abspath);
        let u8data = new Uint8Array(fbuff);

        // create a new session and load the specific model.
        //
        // the model in this example contains a single MatMul node
        // it has 2 inputs: 'a'(float32, 3x4) and 'b'(float32, 4x3)
        // it has 1 output: 'c'(float32, 3x3)
        // 给uint8array来解决找不到文件的问题
        const session = await ort.InferenceSession.create(u8data);
        return session;
    }

    getAIPanel() {
        return AIPanel.ins;
    }

    onPanelInited(panel:AIPanel){
        AIPanel.ins.addMsg(`你好，欢迎使用编辑器AI助手。
本助手可以帮助你生成图片、天空球图片、语音、音乐等服务。

[B]主要用法：[/B]

1. 鼠标右键 选择相应的工具。
2. 通过输入指令直接调用相应的功能，以及进行IDE控制。
3. 如果有引擎相关问题，需要把下面的聊天频道切换到QA,再进行提问（测试版）。

`,-1)
        let helpdata = {
            name:'常用功能',
            properties: [
                {
                    name:'',
                    buttons:[
                        {name:'生成图片',onClick:()=>{new AITaskGenImageNode().execute();}},
                        {name:'生成天空盒',onClick:()=>{new AITaskGenSkyImg().execute();}},
                        {name:'生成音乐',onClick:()=>{new AITaskGenAudioNode().execute();}},
                        {name:'生成语音',onClick:()=>{new AITaskTTS().execute();}},
                    ],
                    inspector:ButtonList
                }
            ]
        };

        AIPanel.ins.addMsg(helpdata,-1)

    }

    //前提是aipanel先执行，创建了AIMenu
    @IEditor.menu('AIMenu/tools/生成图片')
    menu_tools_genimage(){
        let t = new AITaskGenImageNode();
        t.execute();
    }
    @IEditor.menu('AIMenu/tools/生成天空盒')
    menu_tools_genskyimage(){
        let t = new AITaskGenSkyImg();
        t.execute();
    }
    @IEditor.menu('AIMenu/tools/生成音乐')
    menu_tools_genmusic(){
        let t = new AITaskGenAudioNode();
        t.execute();
    }
    @IEditor.menu('AIMenu/tools/生成语音')
    menu_tools_genTTS(){
        let t = new AITaskTTS();
        t.execute();
    }

    @addChatChannel('IDE控制')  //不能用this
    async chatChannelIDE(msg:string){
        let ai = AICore.inst;
        await ai.idectrl(msg);
    }

    @addChatChannel('QA')
    async chatChannelQA(msg:string){
        let ai = AICore.inst;
        await ai.QA(msg);
    }

    testChatChannel(ch:string,msg:string){

    }
}


(window as any).AI = new AICore();


var treeSession: any = null;
async function testai() {
    let pp = 'treemodel.onnx'

    try {

        // let session = treeSession =  await AICore.loadONNX('editorResources/treemodel.onnx')
        // let inputdata = new Float32Array(149);
        // const tensorInput = new ort.Tensor('float32', inputdata, [149]);

        // const feeds = { input: tensorInput};
        // const results = await session.run(feeds);

        // let outdata = results['output'].data;
        // console.log(outdata);
    } catch (e) {
        console.log(`failed to inference ONNX model: ${e}.`);
    }
}

//test
