//! EternalAI API client and Rig integration
//!
//! # Example
//! ```
//! use rig_eternalai::providers::eternalai;
//!
//! let client = eternalai::Client::new("YOUR_API_KEY");
//!
//! let gpt4o = client.completion_model(eternalai::NOUS_RESEARCH_HERMES_3_LLAMA_3_1_70B_FP8);
//! ```

use crate::eternalai_system_prompt_manager_toolset;
use crate::json_utils;
use async_stream::stream;
use rig::OneOrMany;
use rig::agent::AgentBuilder;
use rig::client::ClientBuilderError;
use rig::completion::GetTokenUsage;
use rig::completion::{CompletionError, CompletionRequest};
use rig::embeddings::{EmbeddingError, EmbeddingsBuilder};
use rig::extractor::ExtractorBuilder;
use rig::http_client;
use rig::message;
use rig::message::AssistantContent;
use rig::providers::openai::{self, Message};
use rig::streaming::{RawStreamingChoice, StreamingCompletionResponse};
use rig::{Embed, completion, embeddings};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::{Value, json};
use std::ffi::c_uint;
use std::time::Duration;

// ================================================================
// Main EternalAI Client
// ================================================================
const ETERNALAI_API_BASE_URL: &str = "https://api.eternalai.org/v1";

pub struct ClientBuilder<'a> {
    api_key: &'a str,
    base_url: &'a str,
    http_client: Option<reqwest::Client>,
}

impl<'a> ClientBuilder<'a> {
    pub fn new(api_key: &'a str) -> Self {
        Self {
            api_key,
            base_url: ETERNALAI_API_BASE_URL,
            http_client: None,
        }
    }

    pub fn base_url(mut self, base_url: &'a str) -> Self {
        self.base_url = base_url;
        self
    }

    pub fn custom_client(mut self, client: reqwest::Client) -> Self {
        self.http_client = Some(client);
        self
    }

    pub fn build(self) -> Result<Client, ClientBuilderError> {
        let http_client = if let Some(http_client) = self.http_client {
            http_client
        } else {
            reqwest::Client::builder()
                .timeout(Duration::from_secs(120))
                .build()?
        };

        Ok(Client {
            api_key: self.api_key.to_string(),
            base_url: self.base_url.to_string(),
            http_client,
        })
    }
}

#[derive(Clone)]
pub struct Client {
    api_key: String,
    base_url: String,
    http_client: reqwest::Client,
}

impl Client {
    /// Create a new EternalAI client builder.
    ///
    /// # Example
    /// ```
    /// use rig_eternalai::providers::eternalai::{ClientBuilder, self};
    ///
    /// // Initialize the EternalAI client
    /// let eternalai = Client::builder("your-eternalai-api-key")
    ///    .build()
    /// ```
    pub fn builder(api_key: &str) -> ClientBuilder<'_> {
        ClientBuilder::new(api_key)
    }

    /// Create a new EternalAI client. For more control, use the `builder` method.
    ///
    /// # Panics
    /// - If the reqwest client cannot be built (if the TLS backend cannot be initialized).
    pub fn new(api_key: &str) -> Self {
        Self::builder(api_key)
            .base_url(ETERNALAI_API_BASE_URL)
            .build()
            .expect("EternalAI client should build")
    }

    /// Create a new EternalAI client from the `ETERNALAI_API_KEY` environment variable.
    /// Panics if the environment variable is not set.
    pub fn from_env() -> Self {
        let api_key = std::env::var("ETERNALAI_API_KEY").expect("ETERNALAI_API_KEY not set");
        Self::new(&api_key)
    }

    pub(crate) fn post(&self, path: &str) -> reqwest::RequestBuilder {
        let url = format!("{}/{}", self.base_url, path.trim_start_matches('/'));
        self.http_client.post(url).bearer_auth(&self.api_key)
    }

    /// Create an embedding model with the given name.
    /// Note: default embedding dimension of 0 will be used if model is not known.
    /// If this is the case, it's better to use function `embedding_model_with_ndims`
    ///
    /// # Example
    /// ```
    /// use rig_eternalai::providers::eternalai::{Client, self};
    ///
    /// // Initialize the EternalAI client
    /// let eternalai = Client::new("your-open-ai-api-key");
    ///
    /// let embedding_model = eternalai.embedding_model(eternalai::TEXT_EMBEDDING_3_LARGE);
    /// ```
    pub fn embedding_model(&self, model: impl Into<String>) -> EmbeddingModel {
        let model = model.into();
        let dims = model_dimensions_from_identifier(&model).unwrap_or_default();

        EmbeddingModel::new(self.clone(), model, dims)
    }

    /// Create an embedding model with the given name and the number of dimensions in the embedding generated by the model.
    ///
    /// # Example
    /// ```
    /// use rig_eternalai::providers::eternalai::{Client, self};
    ///
    /// // Initialize the EternalAI client
    /// let eternalai = Client::new("your-open-ai-api-key");
    ///
    /// let embedding_model = eternalai.embedding_model("model-unknown-to-rig", 3072);
    /// ```
    pub fn embedding_model_with_ndims(&self, model: &str, ndims: usize) -> EmbeddingModel {
        EmbeddingModel::with_model(self.clone(), model, ndims)
    }

    /// Create an embedding builder with the given embedding model.
    ///
    /// # Example
    /// ```
    /// use rig_eternalai::providers::eternalai::{Client, self};
    ///
    /// // Initialize the EternalAI client
    /// let eternalai = Client::new("your-open-ai-api-key");
    ///
    /// let embeddings = eternalai.embeddings(eternalai::TEXT_EMBEDDING_3_LARGE)
    ///     .simple_document("doc0", "Hello, world!")
    ///     .simple_document("doc1", "Goodbye, world!")
    ///     .build()
    ///     .await
    ///     .expect("Failed to embed documents");
    /// ```
    pub fn embeddings<D: Embed>(
        &self,
        model: impl Into<String>,
    ) -> EmbeddingsBuilder<EmbeddingModel, D> {
        EmbeddingsBuilder::new(self.embedding_model(model))
    }

    /// Create a completion model with the given name.
    ///
    /// # Example
    /// ```
    /// use rig_eternalai::providers::eternalai::{Client, self};
    ///
    /// // Initialize the EternalAI client
    /// let eternalai = Client::new("your-open-ai-api-key");
    ///
    /// let gpt4 = eternalai.completion_model(eternalai::GPT_4);
    /// ```
    pub fn completion_model(&self, model: &str, chain_id: Option<&str>) -> CompletionModel {
        CompletionModel::new(self.clone(), model, chain_id)
    }

    /// Create an agent builder with the given completion model.
    ///
    /// # Example
    /// ```
    /// use rig_eternalai::providers::eternalai::{Client, self};
    ///
    /// // Initialize the Eternal client
    /// let eternalai = Client::new("your-open-ai-api-key");
    ///
    /// let agent = eternalai.agent(eternalai::UNSLOTH_LLAMA_3_3_70B_INSTRUCT_BNB_4BIT, None)
    ///    .preamble("You are comedian AI with a mission to make people laugh.")
    ///    .temperature(0.0)
    ///    .build();
    /// ```
    pub fn agent(&self, model: &str, chain_id: Option<&str>) -> AgentBuilder<CompletionModel> {
        AgentBuilder::new(self.completion_model(model, chain_id))
    }

    /// Create an extractor builder with the given completion model.
    pub fn extractor<T: JsonSchema + for<'a> Deserialize<'a> + Serialize + Send + Sync>(
        &self,
        model: &str,
    ) -> ExtractorBuilder<CompletionModel, T> {
        ExtractorBuilder::new(self.completion_model(model, None))
    }
}

#[derive(Debug, Deserialize)]
struct ApiErrorResponse {
    message: String,
}

#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum ApiResponse<T> {
    Ok(T),
    Err(ApiErrorResponse),
}

// ================================================================
// EternalAI Embedding API
// ================================================================

/// `text-embedding-3-large` embedding model
pub const TEXT_EMBEDDING_3_LARGE: &str = "text-embedding-3-large";
/// `text-embedding-3-small` embedding model
pub const TEXT_EMBEDDING_3_SMALL: &str = "text-embedding-3-small";
/// `text-embedding-ada-002` embedding model
pub const TEXT_EMBEDDING_ADA_002: &str = "text-embedding-ada-002";

fn model_dimensions_from_identifier(identifier: &str) -> Option<usize> {
    match identifier {
        TEXT_EMBEDDING_3_LARGE => Some(3_072),
        TEXT_EMBEDDING_3_SMALL | TEXT_EMBEDDING_ADA_002 => Some(1_536),
        _ => None,
    }
}

#[derive(Debug, Deserialize)]
pub struct EmbeddingResponse {
    pub object: String,
    pub data: Vec<EmbeddingData>,
    pub model: String,
    pub usage: Usage,
}

impl From<ApiErrorResponse> for EmbeddingError {
    fn from(err: ApiErrorResponse) -> Self {
        EmbeddingError::ProviderError(err.message)
    }
}

impl From<ApiResponse<EmbeddingResponse>> for Result<EmbeddingResponse, EmbeddingError> {
    fn from(value: ApiResponse<EmbeddingResponse>) -> Self {
        match value {
            ApiResponse::Ok(response) => Ok(response),
            ApiResponse::Err(err) => Err(EmbeddingError::ProviderError(err.message)),
        }
    }
}

#[derive(Debug, Deserialize)]
pub struct EmbeddingData {
    pub object: String,
    pub embedding: Vec<f64>,
    pub index: usize,
}

#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Usage {
    pub prompt_tokens: usize,
    pub total_tokens: usize,
}

impl std::fmt::Display for Usage {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        write!(
            f,
            "Prompt tokens: {} Total tokens: {}",
            self.prompt_tokens, self.total_tokens
        )
    }
}

#[derive(Clone)]
pub struct EmbeddingModel {
    client: Client,
    pub model: String,
    ndims: usize,
}

impl embeddings::EmbeddingModel for EmbeddingModel {
    const MAX_DOCUMENTS: usize = 1024;

    type Client = Client;

    fn make(client: &Self::Client, model: impl Into<String>, dims: Option<usize>) -> Self {
        let model = model.into();

        let dims = dims
            .or(model_dimensions_from_identifier(&model))
            .unwrap_or_default();

        Self::new(client.clone(), model, dims)
    }

    fn ndims(&self) -> usize {
        self.ndims
    }

    async fn embed_texts(
        &self,
        documents: impl IntoIterator<Item = String>,
    ) -> Result<Vec<embeddings::Embedding>, EmbeddingError> {
        let documents = documents.into_iter().collect::<Vec<_>>();

        let response = self
            .client
            .post("/embeddings")
            .json(&json!({
                "model": self.model,
                "input": documents,
            }))
            .send()
            .await
            .map_err(|e| EmbeddingError::HttpError(http_client::Error::Instance(e.into())))?;

        if response.status().is_success() {
            match response
                .json::<ApiResponse<EmbeddingResponse>>()
                .await
                .map_err(|e| EmbeddingError::HttpError(http_client::Error::Instance(e.into())))?
            {
                ApiResponse::Ok(response) => {
                    tracing::info!(target: "rig",
                        "EternalAI embedding token usage: {}",
                        response.usage
                    );

                    if response.data.len() != documents.len() {
                        return Err(EmbeddingError::ResponseError(
                            "Response data length does not match input length".into(),
                        ));
                    }

                    Ok(response
                        .data
                        .into_iter()
                        .zip(documents.into_iter())
                        .map(|(embedding, document)| embeddings::Embedding {
                            document,
                            vec: embedding.embedding,
                        })
                        .collect())
                }
                ApiResponse::Err(err) => Err(EmbeddingError::ProviderError(err.message)),
            }
        } else {
            Err(EmbeddingError::ProviderError(
                response.text().await.map_err(|e| {
                    EmbeddingError::HttpError(http_client::Error::Instance(e.into()))
                })?,
            ))
        }
    }
}

impl EmbeddingModel {
    pub fn new(client: Client, model: impl Into<String>, ndims: usize) -> Self {
        Self {
            client,
            model: model.into(),
            ndims,
        }
    }

    pub fn with_model(client: Client, model: &str, ndims: usize) -> Self {
        Self {
            client,
            model: model.to_string(),
            ndims,
        }
    }
}

// ================================================================
// EternalAI Completion API
// ================================================================
pub const NOUS_RESEARCH_HERMES_3_LLAMA_3_1_70B_FP8: &str =
    "NousResearch/Hermes-3-Llama-3.1-70B-FP8";
pub const UNSLOTH_LLAMA_3_3_70B_INSTRUCT_BNB_4BIT: &str = "unsloth/Llama-3.3-70B-Instruct-bnb-4bit";

pub const MAPPING_CHAINID: [(&str, &str); 2] = [
    (NOUS_RESEARCH_HERMES_3_LLAMA_3_1_70B_FP8, "45762"),
    (UNSLOTH_LLAMA_3_3_70B_INSTRUCT_BNB_4BIT, "45762"),
];

pub fn get_chain_id(key: &str) -> Option<&str> {
    for &(k, v) in &MAPPING_CHAINID {
        if k == key {
            return Some(v);
        }
    }
    None
}

#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct CompletionResponse {
    pub id: String,
    pub object: String,
    pub created: u64,
    pub model: String,
    pub system_fingerprint: Option<String>,
    pub choices: Vec<Choice>,
    pub usage: Option<Usage>,
    pub onchain_data: Option<Value>,
}

impl GetTokenUsage for CompletionResponse {
    fn token_usage(&self) -> Option<rig::completion::Usage> {
        let api_usage = self.usage.clone()?;
        let mut usage = rig::completion::Usage::new();

        usage.input_tokens = api_usage.prompt_tokens as u64;
        usage.total_tokens = api_usage.total_tokens as u64;
        usage.output_tokens = (api_usage.total_tokens - api_usage.prompt_tokens) as u64;
        Some(usage)
    }
}

impl From<ApiErrorResponse> for CompletionError {
    fn from(err: ApiErrorResponse) -> Self {
        CompletionError::ProviderError(err.message)
    }
}

impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
    type Error = CompletionError;

    fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
        let choice = response.choices.first().ok_or_else(|| {
            CompletionError::ResponseError("Response contained no choices".to_owned())
        })?;
        let content = match &choice.message {
            Message::Assistant {
                content,
                tool_calls,
                ..
            } => {
                let mut content = content
                    .iter()
                    .map(|c| match c {
                        openai::AssistantContent::Text { text } => {
                            completion::AssistantContent::text(text)
                        }
                        openai::AssistantContent::Refusal { refusal } => {
                            completion::AssistantContent::text(refusal)
                        }
                    })
                    .collect::<Vec<_>>();

                content.extend(
                    tool_calls
                        .iter()
                        .map(|call| {
                            completion::AssistantContent::tool_call(
                                &call.id,
                                &call.function.name,
                                call.function.arguments.clone(),
                            )
                        })
                        .collect::<Vec<_>>(),
                );
                Ok(content)
            }
            _ => Err(CompletionError::ResponseError(
                "Response did not contain a valid message or tool call".into(),
            )),
        }?;

        let choice = OneOrMany::many(content).map_err(|_| {
            CompletionError::ResponseError(
                "Response contained no message or tool call (empty)".to_owned(),
            )
        })?;
        let usage = response
            .usage
            .as_ref()
            .map(|usage| completion::Usage {
                input_tokens: usage.prompt_tokens as u64,
                output_tokens: (usage.total_tokens - usage.prompt_tokens) as u64,
                total_tokens: usage.total_tokens as u64,
            })
            .unwrap_or_default();

        Ok(completion::CompletionResponse {
            choice,
            usage,
            raw_response: response,
        })
    }
}

#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct Choice {
    pub index: usize,
    pub message: Message,
    pub logprobs: Option<serde_json::Value>,
    pub finish_reason: String,
}

#[derive(Debug, Deserialize)]
pub struct ToolCall {
    pub id: String,
    pub r#type: String,
    pub function: Function,
}

#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ToolDefinition {
    pub r#type: String,
    pub function: completion::ToolDefinition,
}

impl From<completion::ToolDefinition> for ToolDefinition {
    fn from(tool: completion::ToolDefinition) -> Self {
        Self {
            r#type: "function".into(),
            function: tool,
        }
    }
}

#[derive(Debug, Deserialize)]
pub struct Function {
    pub name: String,
    pub arguments: String,
}

#[derive(Clone)]
pub struct CompletionModel {
    client: Client,
    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
    pub model: String,
    pub chain_id: String,
}

impl CompletionModel {
    pub fn new(client: Client, model: impl Into<String>, chain_id: Option<&str>) -> Self {
        Self {
            client,
            model: model.into(),
            chain_id: chain_id.unwrap_or_default().into(),
        }
    }

    pub fn with_model(client: Client, model: &str, chain_id: Option<&str>) -> Self {
        Self {
            client,
            model: model.into(),
            chain_id: chain_id.unwrap_or_default().into(),
        }
    }
}

impl completion::CompletionModel for CompletionModel {
    type Response = CompletionResponse;
    type StreamingResponse = CompletionResponse;

    type Client = Client;

    fn make(client: &Self::Client, model: impl Into<String>) -> Self {
        Self::new(client.clone(), model, None)
    }

    async fn completion(
        &self,
        completion_request: CompletionRequest,
    ) -> Result<completion::CompletionResponse<CompletionResponse>, CompletionError> {
        // Build up the order of messages (context, chat_history)
        let mut partial_history = vec![];
        if let Some(docs) = completion_request.normalized_documents() {
            partial_history.push(docs);
        }
        partial_history.extend(completion_request.chat_history);

        // Initialize full history with preamble (or empty if non-existent)
        let mut full_history: Vec<Message> = completion_request
            .preamble
            .map_or_else(Vec::new, |preamble| vec![Message::system(&preamble)]);

        // Convert prompt to user message
        tracing::info!("Try to get on-chain system prompt");
        let eternal_ai_rpc = std::env::var("ETERNALAI_RPC_URL").unwrap_or_else(|_| "".to_string());
        let eternal_ai_contract =
            std::env::var("ETERNALAI_AGENT_CONTRACT_ADDRESS").unwrap_or_else(|_| "".to_string());
        let eternal_ai_agent_id =
            std::env::var("ETERNALAI_AGENT_ID").unwrap_or_else(|_| "".to_string());
        if !eternal_ai_rpc.is_empty()
            && !eternal_ai_contract.is_empty()
            && !eternal_ai_agent_id.is_empty()
        {
            tracing::info!(
                "get on-chain system prompt with {}, {}, {}",
                eternal_ai_rpc,
                eternal_ai_contract,
                eternal_ai_agent_id
            );
            let c_value: c_uint = eternal_ai_agent_id.parse::<u32>().unwrap_or(0);
            let prompt = match eternalai_system_prompt_manager_toolset::get_on_chain_system_prompt(
                &eternal_ai_rpc,
                &eternal_ai_contract,
                c_value,
            )
            .await
            {
                Ok(value) => value,
                Err(e) => return Err(CompletionError::ProviderError(e)),
            };
            match prompt {
                None => {
                    tracing::info!("on-chain system prompt is none")
                }
                Some(value) => {
                    full_history.push(Message::system(&value));
                }
            }
        }

        // Convert and extend the rest of the history
        full_history.extend(
            partial_history
                .into_iter()
                .map(message::Message::try_into)
                .collect::<Result<Vec<Vec<Message>>, _>>()?
                .into_iter()
                .flatten()
                .collect::<Vec<_>>(),
        );

        let request = if completion_request.tools.is_empty() {
            json!({
                "model": self.model,
                "messages": full_history,
                "temperature": completion_request.temperature,
            })
        } else {
            json!({
                "model": self.model,
                "messages": full_history,
                "temperature": completion_request.temperature,
                "tools": completion_request.tools.into_iter().map(ToolDefinition::from).collect::<Vec<_>>(),
                "tool_choice": "auto",
            })
        };

        tracing::debug!(target: "rig", "Sending completion request: {}", request);

        let response = self
            .client
            .post("/chat/completions")
            .json(
                &if let Some(params) = completion_request.additional_params {
                    json_utils::merge(request, params)
                } else {
                    request
                },
            )
            .send()
            .await
            .map_err(|e| CompletionError::HttpError(http_client::Error::Instance(e.into())))?;

        if response.status().is_success() {
            match response
                .json::<ApiResponse<CompletionResponse>>()
                .await
                .map_err(|e| CompletionError::HttpError(http_client::Error::Instance(e.into())))?
            {
                ApiResponse::Ok(response) => {
                    tracing::info!(target: "rig",
                        "EternalAI completion token usage: {:?}",
                        response.usage.clone().map(|usage| format!("{usage}")).unwrap_or("N/A".to_string())
                    );
                    match &response.onchain_data {
                        Some(data) => {
                            let onchain_data = serde_json::to_string_pretty(data)?;
                            tracing::info!("onchain_data: {}", onchain_data);
                        }
                        None => {
                            tracing::info!("onchain_data: None");
                        }
                    }
                    response.try_into()
                }
                ApiResponse::Err(err) => Err(CompletionError::ProviderError(err.message)),
            }
        } else {
            Err(CompletionError::ProviderError(
                response.text().await.map_err(|e| {
                    CompletionError::HttpError(http_client::Error::Instance(e.into()))
                })?,
            ))
        }
    }

    async fn stream(
        &self,
        request: CompletionRequest,
    ) -> Result<StreamingCompletionResponse<Self::StreamingResponse>, CompletionError> {
        let resp = self.completion(request).await?;

        let stream = Box::pin(stream! {
            for c in resp.choice {
                match &c {
                    AssistantContent::Text(text) => {
                        yield Ok(RawStreamingChoice::Message(text.text.clone()))
                    }
                    AssistantContent::ToolCall(tc) => {
                        yield Ok(RawStreamingChoice::ToolCall {
                            id: tc.id.clone(),
                            call_id: None,
                            name: tc.function.name.clone(),
                            arguments: tc.function.arguments.clone(),
                        })
                    }
                    AssistantContent::Image(_) => {
                        panic!("Image content is currently unimplemented on Eternal AI. If you need this, please open a ticket!")
                    }
                    AssistantContent::Reasoning(_) => {
                        panic!("Reasoning is currently unimplemented on Eternal AI. If you need this, please open a ticket!")
                    }
                }
            }

            yield Ok(RawStreamingChoice::FinalResponse(resp.raw_response.clone()));
        });

        Ok(StreamingCompletionResponse::stream(stream))
    }
}
