use super::ChatLLMConfig;
use llm_sdk::chat::ChatMessage;
use llm_sdk::chat::AssistantMessage;
use llm_sdk::chat::ChatRequestBuilder;
use llm_sdk::LlmSdk;
use anyhow::{Context, Result};
use llm_sdk::chat::ChatRequest;
use llm_sdk::chat::ToolDefinition;
use std::path::Path;

pub struct ChatLLM {
    pub(crate) config: ChatLLMConfig,
    llm_sdk: LlmSdk,
}

impl ChatLLM {
    pub fn load_config<P>(config_path: P) -> Result<Self> 
        where P: AsRef<Path>
    {
        let path = config_path.as_ref();
        let config_content = std::fs::read_to_string(path).context(format!("Read ChatLLM config '{:?}' failed", path))?;
        let llm_config: ChatLLMConfig = toml::from_str(&config_content).context("Load ChatLLM")?;
        Ok(Self::new(llm_config))
    }

    pub fn new(config: ChatLLMConfig) -> Self {
        Self { config, llm_sdk: LlmSdk::new(), }
    }

    pub async fn ask_messages(&self, messages: impl Into<Vec<ChatMessage>>) -> Result<AssistantMessage> {
        let request = self.request_builder()
            .messages(messages)
            .build().context("Build request")?;
        self.ask(&request).await
    }

    pub async fn ask_with_tools(
        &self, 
        messages: impl Into<Vec<ChatMessage>>,
        tools: impl Into<Vec<ToolDefinition>>,
    ) -> Result<AssistantMessage> 
    {
        let request = self.request_builder()
            .messages(messages)
            .tools(tools)
            .build().context("Build request")?;
        self.ask(&request).await
    }

    pub async fn ask(&self, request: &ChatRequest) -> Result<AssistantMessage> {
        Ok(self.llm_sdk.chat(
            &self.config.base_url, 
            &self.config.api_key, 
            &request
        ).await?.choices.pop().context("No choice in reponse")?.message)
    }

    fn request_builder(&self) -> ChatRequestBuilder {
        let mut builder = ChatRequestBuilder::default();
        
        builder.model(&self.config.model);

        if let Some(max_tokens) = self.config.max_tokens {
            builder.max_tokens(max_tokens);
        }
        if let Some(stream) = self.config.stream {
            builder.stream(stream);
        }
        if let Some(temperature) = self.config.temperature {
            builder.temperature(temperature);
        }

        builder
    }
}

#[cfg(test)]
mod test {
    use super::*;

    #[tokio::test]
    async fn test_ask() {
        let llm = ChatLLM::load_config("./config/config.toml").unwrap();
        let messages = vec![
            ChatMessage::system("I can answer any question you ask me."),
            ChatMessage::user("你好！"),
        ];
        
        println!("{:#?}", llm.ask_messages(messages).await.unwrap().content.unwrap())
    }
}