use generator_engine::tera::TeraEngine;
use model_graph_types::assistant::prompts::{
    PromptTemplateItem, ASSISTANT_ROLE_KEY, SYSTEM_ROLE_KEY,
};
use model_graph_types::modeling::assistant::{ModelProvider, ModelProviderSetting, ModelType};
use ollama_rs::generation::chat::request::ChatMessageRequest;
use ollama_rs::{generation::chat::ChatMessage, Ollama};

use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};

use model_graph_types::{
    container::workflow::LlmStatement, generator::CodeGenerator, modeling::Value,
};

use model_graph_model::{
    model::{default_model_name, get_model},
    provider::get_provider,
};

use crate::workflow::context::{Getter, WorkflowContext};

pub async fn execute(
    statement: &LlmStatement,
    context: Arc<Mutex<WorkflowContext>>,
) -> anyhow::Result<HashMap<String, Value>> {
    //默认的模型名称
    let (default_provider_name, default_model_name) = default_model_name(ModelType::LLM).await?;

    let prompts = &statement.prompt_template;
    let model = &statement.model;

    //获取provider
    let provider_name = if &model.provider == "" {
        model.provider.clone()
    } else {
        default_provider_name
    };

    let (provider, provider_setting) = get_provider(&provider_name).await?;
    tracing::debug!("Provider:{:?}|{:?}", provider, provider_setting);

    let model_name = if let Some(model_name) = &model.name {
        //获取模型
        if let Ok((model, model_setting)) = get_model(model_name).await {
            tracing::debug!("Model:{:?}|{:?}", model, model_setting);
            model.name.clone()
        } else {
            model_name.clone()
        }
    } else {
        default_model_name
    };

    let temperature = if let Some(temperature) = &model.completion_params {
        if let Some(t) = temperature.temperature {
            t
        } else {
            0.8
        }
    } else {
        0.8
    };
    //执行
    let (url, port) = _get_url_from_provider(&provider, &provider_setting, &model_name)?;

    //TODO 超时
    let timeout = 120000u64;
    tracing::debug!("{}", url);

    // For custom values:
    let mut ollama = Ollama::new(url, port);

    //调用节点
    let mut history = vec![];

    let res_messages = ollama
        .send_chat_messages_with_history(
            &mut history, // <- messages will be saved here
            ChatMessageRequest::new(
                model_name,
                _get_prompts(prompts, context.clone()).await?, // <- You should provide only one message
            ),
        )
        .await
        .map_err(|err| anyhow::anyhow!("{}", err))?;

    //转换json
    let response_message = res_messages.message.content;

    tracing::debug!("{:?}", response_message);

    let mut outputs: HashMap<String, Value> = HashMap::new();

    outputs.insert(String::from("text"), Value::String(response_message));
    Ok(outputs)
}

fn _get_url_from_provider(
    provider: &ModelProvider,
    setting: &ModelProviderSetting,
    model_name: &String,
) -> anyhow::Result<(String, u16)> {
    let config_str = &setting.encrypted_config;
    //
    if provider.name == "ollama" {
        let setting: OllamaSetting = serde_json::from_str(config_str.as_str())?;
        return Ok((setting.url_base.clone(), setting.port));
    }
    Err(anyhow::anyhow!("provider不是ollama"))
}

async fn _get_prompts(
    prompt_template: &Vec<PromptTemplateItem>,
    context: Arc<Mutex<WorkflowContext>>,
) -> anyhow::Result<Vec<ChatMessage>> {
    let mut list = vec![];

    //
    let engine = TeraEngine::default();
    let arguments = Value::Object(context.get_map_values(Some(String::from("N")))?);

    for prompt in prompt_template {
        if prompt.text.contains("{{") {
            let prompt_new = prompt.text.replace("{{#", "{{N").replace("#}}", "}}");
            let prompt_str = engine.simple(arguments.clone(), &prompt_new).await?;

            if &prompt.role == SYSTEM_ROLE_KEY {
                list.push(ChatMessage::system(prompt_str));
            } else if &prompt.role == ASSISTANT_ROLE_KEY {
                list.push(ChatMessage::assistant(prompt_str));
            } else {
                list.push(ChatMessage::user(prompt_str));
            }
        } else {
            if &prompt.role == SYSTEM_ROLE_KEY {
                list.push(ChatMessage::system(prompt.text.clone()));
            } else if &prompt.role == ASSISTANT_ROLE_KEY {
                list.push(ChatMessage::assistant(prompt.text.clone()));
            } else {
                list.push(ChatMessage::user(prompt.text.clone()));
            }
        }
    }
    Ok(list)
}

#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
struct OpenAIChoiceMessage {
    pub role: Option<String>,
    pub content: String,
}

#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
struct OpenAIChoice {
    pub index: Option<i32>,
    pub message: OpenAIChoiceMessage,
    pub finish_reason: Option<String>,
}

#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)]
struct OpenAIChatResponse {
    pub choices: Vec<OpenAIChoice>,
}

#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)]
struct OllamaSetting {
    pub url_base: String,
    pub port: u16,
}
