mod http;
mod ollama;

use model_graph_types::modeling::assistant::OLLAMA_MODEL_PROVIDER;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};

use model_graph_types::{
    container::workflow::{
        LlmStatement, WorkflowBlockExecuteResult, WorkflowBlockExecuteResultBuilder,
        WorkflowBlockExecuteResultStatus,
    },
    modeling::Value,
};

use crate::workflow::context::WorkflowContext;

pub async fn execute(
    statement: &LlmStatement,
    context: Arc<Mutex<WorkflowContext>>,
) -> anyhow::Result<WorkflowBlockExecuteResult> {
    let result = _execute(statement, context).await;

    match result {
        Ok(v) => Ok(WorkflowBlockExecuteResultBuilder::default()
            .status(WorkflowBlockExecuteResultStatus::Succeeded)
            .source_handle("source")
            .result(v)
            .build()?),
        Err(err) => Ok(WorkflowBlockExecuteResultBuilder::default()
            .status(WorkflowBlockExecuteResultStatus::Succeeded)
            .source_handle("fail-branch")
            .build()?),
    }
}

pub async fn _execute(
    statement: &LlmStatement,
    context: Arc<Mutex<WorkflowContext>>,
) -> anyhow::Result<HashMap<String, Value>> {
    //获取provider
    let model = &statement.model.provider;

    if model.eq(OLLAMA_MODEL_PROVIDER) {
        //
        return ollama::execute(statement, context).await;
    } else {
        //
        return http::execute(statement, context).await;
    }
}
