
use super::ChatLLM;

use anyhow::{Context, Result};
use llm_sdk::chat::{AssistantMessage, ChatMessage, ChatRequest, ChatRequestBuilder, ToolCall, ToolDefinition};
use std::path::Path;

pub struct ChatMemoryLLM {
    llm: ChatLLM,
    max_messages: usize,
    request: ChatRequest,
}

impl ChatMemoryLLM {
    pub fn load_config<P>(config_path: P, max_messages: usize) -> Result<Self> 
        where P: AsRef<Path>
    {
        let llm = ChatLLM::load_config(config_path).context("Load LLM")?;
        let request = ChatRequestBuilder::default()
            .model(&llm.config.model)
            .messages(Vec::new())
            .build().context("Build init ChatRequest")?;

        Ok(Self { llm, request, max_messages })
    }

    /// Post request and save response into memory!
    pub async fn ask(&mut self) -> Result<&AssistantMessage> {
        let message = self.llm.ask(&self.request).await.context("Ask LLM")?;
        let message = ChatMessage::Assistant(message);
        self.add_message(message);
        if let ChatMessage::Assistant(message) = &self.messages().last().unwrap() {
            Ok(message)
        } else {
            panic!("")
        }
    }

    pub fn with_tools(&mut self, tools: Vec<ToolDefinition>) {
        self.request.tools = tools;
    }

    pub fn add_tool(&mut self, tool: ToolDefinition) {
        self.request.tools.push(tool);
    }

    pub fn add_message(&mut self, message: ChatMessage) {
        self.request.messages.push(message);
        self.limit_messages();
    }

    pub fn add_messages<I: Iterator<Item = ChatMessage>>(&mut self, iter: I) {
        self.request.messages.extend(iter);
        self.limit_messages();
    }

    pub fn add_system_message(&mut self, content: impl Into<String>) {
        self.add_message(ChatMessage::system(content));
    }

    pub fn add_user_message(&mut self, content: impl Into<String>) {
        self.add_message(ChatMessage::user(content));
    }

    pub fn add_assistant_message(&mut self, content: impl Into<String>, tool_calls: impl Into<Vec<ToolCall>>) {
        self.add_message(ChatMessage::assistant(Some(content), tool_calls));
    }

    pub fn add_tool_message(&mut self, content: impl Into<String>, tool_call_id: impl Into<String>) {
        self.add_message(ChatMessage::tool(content, tool_call_id));
    }

    pub fn clear(&mut self) {
        self.request.messages.clear();
    }

    pub fn messages(&self) -> impl Iterator<Item = &ChatMessage> {
        self.request.messages.iter()
    }

    pub fn last_message(&self) -> Option<&ChatMessage> {
        self.request.messages.last()
    }

    fn limit_messages(&mut self) {
        if self.request.messages.len() > self.max_messages {
            let excess = self.request.messages.len() - self.max_messages;
            self.request.messages.drain(0..excess);
        }
    }
}

#[cfg(test)]
mod test {
    use super::*;

    #[tokio::test]
    async fn test() {
        let mut llm = ChatMemoryLLM::load_config("./config/config.toml", 100).unwrap();
        llm.add_system_message("I can answer any question you ask me.");
        llm.add_user_message("你好，我是 MoleSir！");
        
        let response = llm.ask().await.unwrap().content.clone().unwrap();
        println!("{:#?}\n\n", response);

        llm.add_user_message("我叫什么？");

        let response = llm.ask().await.unwrap().content.clone().unwrap();
        println!("{:#?}\n\n", response);

        assert!(response.contains("MoleSir"));
    }
}