package com.langchain4j.core.flow.node.impl;

import com.langchain4j.core.flow.node.FlowNode;
import com.langchain4j.core.flow.context.NodeContext;
import com.langchain4j.core.flow.result.NodeResult;
import com.langchain4j.core.llm.factory.LLMProviderFactory;
import com.langchain4j.core.llm.model.GenerationConfig;
import com.langchain4j.core.llm.provider.LLMProvider;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;

/**
 * LLM调用节点
 */
@Slf4j
@Component
public class LLMCallNode extends FlowNode {
    
    @Autowired
    private LLMProviderFactory providerFactory;
    
    public LLMCallNode() {
        this.type = NodeType.LLM_CALL;
        this.name = "LLM调用";
        this.description = "调用大语言模型生成文本";
    }
    
    @Override
    public CompletableFuture<NodeResult> execute(NodeContext context) {
        return CompletableFuture.supplyAsync(() -> {
            try {
                context.log("开始执行LLM调用节点");
                
                // 获取输入参数
                String prompt = (String) context.getInput("prompt");
                String providerType = (String) context.getInput("provider", "openai");
                String modelName = (String) context.getInput("model");
                
                if (prompt == null || prompt.trim().isEmpty()) {
                    return NodeResult.failure(getId(), "提示词不能为空", null);
                }
                
                // 构建生成配置
                GenerationConfig config = buildGenerationConfig(context);
                if (modelName != null) {
                    config.setModelName(modelName);
                }
                
                // 获取LLM提供者
                LLMProvider provider = providerFactory.getProvider(providerType);
                
                context.log("使用提供者: " + providerType + ", 模型: " + config.getModelName());
                
                // 调用LLM
                var result = provider.generate(prompt, config).get();
                
                if (result.getSuccess()) {
                    Map<String, Object> outputs = new HashMap<>();
                    outputs.put("content", result.getContent());
                    outputs.put("model", result.getModelName());
                    outputs.put("tokenUsage", result.getTokenUsage());
                    outputs.put("generatedAt", result.getGeneratedAt());
                    
                    context.log("LLM调用成功，生成内容长度: " + result.getContent().length());
                    
                    return NodeResult.success(getId(), outputs);
                } else {
                    return NodeResult.failure(getId(), "LLM调用失败: " + result.getErrorMessage(), null);
                }
                
            } catch (Exception e) {
                context.logError("LLM调用节点执行失败", e);
                return NodeResult.failure(getId(), "LLM调用异常: " + e.getMessage(), e);
            }
        });
    }
    
    @Override
    public boolean validate() {
        return config != null && config.containsKey("prompt");
    }
    
    @Override
    public Map<String, Object> getInputSchema() {
        Map<String, Object> schema = new HashMap<>();
        schema.put("prompt", Map.of(
                "type", "string",
                "required", true,
                "description", "输入提示词"
        ));
        schema.put("provider", Map.of(
                "type", "string",
                "required", false,
                "default", "openai",
                "description", "LLM提供者类型"
        ));
        schema.put("model", Map.of(
                "type", "string",
                "required", false,
                "description", "模型名称"
        ));
        schema.put("temperature", Map.of(
                "type", "number",
                "required", false,
                "default", 0.7,
                "description", "温度参数"
        ));
        schema.put("maxTokens", Map.of(
                "type", "number",
                "required", false,
                "default", 2048,
                "description", "最大token数"
        ));
        schema.put("systemPrompt", Map.of(
                "type", "string",
                "required", false,
                "description", "系统提示词"
        ));
        return schema;
    }
    
    @Override
    public Map<String, Object> getOutputSchema() {
        Map<String, Object> schema = new HashMap<>();
        schema.put("content", Map.of(
                "type", "string",
                "description", "生成的文本内容"
        ));
        schema.put("model", Map.of(
                "type", "string",
                "description", "使用的模型名称"
        ));
        schema.put("tokenUsage", Map.of(
                "type", "object",
                "description", "Token使用情况"
        ));
        schema.put("generatedAt", Map.of(
                "type", "datetime",
                "description", "生成时间"
        ));
        return schema;
    }
    
    /**
     * 构建生成配置
     */
    private GenerationConfig buildGenerationConfig(NodeContext context) {
        GenerationConfig config = GenerationConfig.defaultConfig();
        
        Object temperature = context.getInput("temperature");
        if (temperature != null) {
            config.setTemperature(Double.valueOf(temperature.toString()));
        }
        
        Object maxTokens = context.getInput("maxTokens");
        if (maxTokens != null) {
            config.setMaxTokens(Integer.valueOf(maxTokens.toString()));
        }
        
        Object topP = context.getInput("topP");
        if (topP != null) {
            config.setTopP(Double.valueOf(topP.toString()));
        }
        
        Object systemPrompt = context.getInput("systemPrompt");
        if (systemPrompt != null) {
            config.setSystemPrompt(systemPrompt.toString());
        }
        
        return config;
    }
} 