import { Annotation } from '@langchain/langgraph';
import { baseInputVars, LLMNode, LLMProvider } from "../../../core";
import { Jimp } from "jimp";

export class IdentifyImgBgColorNode extends LLMNode {
    name = "IdentifyImgBgColorNode";
    inputVars = Annotation.Root({
        ...baseInputVars.spec,
        image: Annotation<string>,
        bbox: Annotation<number[]>,
    });

    outputVars = Annotation.Root({
        color: Annotation<string>,
        sourceImageBase64: Annotation<string>,
        imageBuffer: Annotation<any>,
    });

    async Run() {
        const image = await Jimp.read(this.inputVars.State.image);
        const base64 = await image.getBase64("image/jpeg");

        const model = LLMProvider.current.DoubaoSeed16Vision;
        model.thinking = "disabled";
        const prompts = [
            {
                role: "user",
                content: [
                    {
                        type: "image_url",
                        image_url: {
                            url: base64
                        }
                    },
                    {
                        text: "识别图片背景，然后判断应该在这个图片上面画矩形框应该选取的颜色。 比如暖色的背景就用蓝色框， 冷色背景用红色框， 黑色背景用白色框， 白色背景用红色框。 输出： blue、 red、white 中的一种",
                        type: "text"
                    }
                ]
            }
        ] as any;
        const outputStream = await model.stream(prompts);
        for await (const chunk of outputStream) {
        }
        this.usage.add(model.totalUsage);
        this.outputVars.State.sourceImageBase64 = base64;
        this.outputVars.State.color = model.lastFullResponse;
        this.outputVars.State.imageBuffer = image;

        return this.outputVars.State;
    }

}