use std::path::PathBuf;
use anyhow::{Context, Result};
use derive_builder::Builder;
use llm_sdk::chat::ToolCall;
use serde_json::Value;
use crate::llm::chat::ChatMemoryLLM;
use crate::tool::CommandExecutor;
use crate::tool::EmailSender;
use crate::tool::FileCreator;
use crate::tool::FileReader;
use crate::tool::FileWritor;
use crate::tool::ToolCollection;
use crate::tool::UrlReader;
use crate::tool::WebSearch;
use crate::tool::{Terminator, Calculator, Tool};
use log::info;


#[derive(Debug, Builder)]
#[builder(name = "AgentConfigBuilder")]
pub struct AgentConfig {
    pub name: &'static str,
    pub description: Option<&'static str>,
    pub system_prompt: String,
    pub next_step_prompt: &'static str, 
    pub llm_config_path: PathBuf,
    pub max_messages: usize,
    pub max_steps: usize,
}

#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum AgentState {
    Idle,
    Run,
}

pub struct Agent {
    config: AgentConfig,
    llm: ChatMemoryLLM,
    tool_collection: ToolCollection,
    current_step: usize,
    state: AgentState,
}

pub const NEXT_STEP_PROMPT: &str = 
r#"Based on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.
If you want to stop the interaction at any point, use the `terminate` tool/function call."#;

impl Default for AgentConfig {
    fn default() -> Self {
        Self {
            name: "EDAgent",
            description: Some("A versatile agent that can solve various tasks using multiple tools"),
            system_prompt: format!(
                "You are an all-capable AI assistant. The workspace directory is: {}", 
                std::env::current_dir().unwrap().join("workspace").display()
            ),
            next_step_prompt: NEXT_STEP_PROMPT,
            llm_config_path: "./config/config.toml".into(),
            max_messages: 100,
            max_steps: 10,
        }
    }
}

impl Agent {
    pub fn load_config(config: AgentConfig) -> Result<Self> {
        let tools: Vec<Box<dyn Tool>> = vec![ 
            Box::new(Calculator::new()), 
            Box::new(Terminator::new()),
            Box::new(CommandExecutor::new()),
            Box::new(WebSearch::new()),
            Box::new(UrlReader::new()),
            Box::new(FileCreator::new()),
            Box::new(FileWritor::new()),
            Box::new(FileReader::new()),
            Box::new(EmailSender::new()),
        ];
        let tool_collection = ToolCollection::new(tools.into_iter());
        let tool_defines = tool_collection.to_function_defines();

        let mut llm = ChatMemoryLLM::load_config(&config.llm_config_path, config.max_messages).context("Build LLM")?;
        llm.add_system_message(config.system_prompt.clone());
        llm.with_tools(tool_defines);

        Ok(Agent {
            config, llm, current_step: 0,
            tool_collection,
            state: AgentState::Idle,
        })
    }

    pub async fn run(&mut self, request: String) -> Result<String> {
        assert_eq!(self.state, AgentState::Idle);
        self.state = AgentState::Run;

        self.llm.add_user_message(request);

        let mut results = Vec::<String>::new();

        while self.state != AgentState::Idle && self.current_step < self.config.max_steps {
            self.current_step += 1;
            info!("🔢 Executing step {}/{}", self.current_step, self.config.max_steps);
            let step_result = self.step().await.context(format!("Step {}", self.current_step))?;
            results.push(format!("Step {}: {}", self.current_step, step_result));
        }

        Ok(results.join("\n"))
    }

    pub async fn step(&mut self) -> Result<String> {
        // Add next_step_prompt for each step
        self.llm.add_user_message(self.config.next_step_prompt.to_string());

        // 1. Think!        
        // I hate rust borrow checker :)
        let message = self.llm.ask().await.context("Chat with LLM")?.clone();

        info!("🧠 {}'s thoughts: {}", self.config.name, message.content.as_ref().unwrap());
        for tool_call in message.tool_calls.iter() {
            info!("🧰 EDAgent execite tool: {}({})", tool_call.function.name, tool_call.function.arguments);
        }

        // 2. Act!
        let mut results = Vec::new();
        for tool_call in message.tool_calls.iter() {
            let res = self.execute_tool(tool_call).await.context(format!("Execute '{:?}'", tool_call))?;
            results.push(res);
        }
        Ok(results.join(" "))
    }

    pub async fn execute_tool(&mut self, tool_call: &ToolCall) -> Result<String> {
        let function_name = &tool_call.function.name;
        let args: Value = serde_json::from_str(&tool_call.function.arguments).context("Dserde function arguments")?;
        
        let observation = match self.tool_collection.execute(function_name, args).await {
            Ok(result) => {       

                let observation = match &result {
                    Some(result) => format!("Observed output of cmd `{function_name}` executed:\n{result}"),
                    None => format!("Cmd `{function_name}` completed with no output"),
                };

                self.llm.add_tool_message(
                    result.unwrap_or("".to_string()),
                    tool_call.id.to_string()
                );
        
                observation
            }
            Err(err) => {
                self.llm.add_tool_message(
                    format!("Error calling function '{}' for '{:?}'", function_name, err),
                    tool_call.id.to_string()
                );
                "Execute function failed!".to_string()
            }
        };

        if function_name == "terminate" {
            self.state = AgentState::Idle;
        }

        return Ok(observation)
    }
}