use crate::config::Config;
use crate::event::Event;

use async_trait::async_trait;
use serde::{Serialize,Deserialize};
use std::sync::atomic::AtomicBool;
use strum_macros::Display;
use strum_macros::EnumIter;
use tokio::sync::mpsc::UnboundedSender;

use async_openai::types::ChatCompletionRequestMessage;
use async_openai::types::Role;

use dirs;
use reqwest::header::HeaderMap;
use serde_json::{json, Value};
use std;
use std::collections::HashMap;
use core::fmt::Display;

use dotenv::dotenv;
use std::env;

use crate::prompts;

use tracing::{info,debug};

pub mod chatglm;
pub mod chatgpt;
pub mod ollama;

use self::chatglm::ChatGLM;
use self::chatgpt::ChatGPT;
use self::ollama::Ollama;

use std::sync::Arc;
use std::fmt::Debug;

#[async_trait]
pub trait LLM: Send + Sync + Debug {
    async fn ask(
        &self,
        sender: UnboundedSender<Event>,
        terminate_response_signal: Arc<AtomicBool>,
    ) -> Result<(), Box<dyn std::error::Error>>;

    // for Agent
    async fn call(&self, messages: Vec<Message>)-> Result<String, Box<dyn std::error::Error + Send>>;

    fn append_chat_msg(&mut self, msg: String, role: LLMRole);
    fn clear(&mut self);

}

#[derive(Clone, Debug)]
pub enum LLMAnswer {
    StartAnswer,
    Answer(String),
    EndAnswer,
}

#[derive(EnumIter, Display, Debug)]
#[strum(serialize_all = "lowercase")]
pub enum LLMRole {
    ASSISTANT,
    SYSTEM,
    USER,
}

#[derive(Deserialize, PartialEq, Debug)]
#[serde(rename_all = "lowercase")]
pub enum LLMBackend {
    ChatGPT,
    Ollama,
    ChatGLM,
}

pub struct LLMModel;

impl LLMModel {
    pub async fn init(model: &LLMBackend, config: Arc<Config>) -> Box<dyn LLM> {
        match model {
            LLMBackend::ChatGPT => Box::new(ChatGPT::new(config.chatgpt.clone())),
            LLMBackend::Ollama => Box::new(Ollama::new(config.ollama.clone().unwrap())),
            LLMBackend::ChatGLM => Box::new(ChatGLM::new(config.chatglm.clone())),
        }
    }
}

pub fn read_default_prompts() -> Vec<HashMap<String, String>> {
    let prompts_path = dirs::config_dir()
        .unwrap()
        .join("riir")
        .join("prompts.toml");

    if !prompts_path.exists() {
        panic!("Error: 'prompts.toml' not found in the current working directory.");
    }

    prompts::read_messages_from_toml(&prompts_path.to_str().unwrap())
}




#[derive(Debug, Serialize, Clone)]
pub struct Message {
    pub role: String,
    pub content: String,
}

#[derive(Debug, Serialize, Clone)]
pub struct ChatCompletion {
    pub model: String,
    pub messages: Vec<Message>,
    pub temperature: f32,
}

#[derive(Debug, Deserialize)]
pub struct APIMessage {
    pub content: String,
}

#[derive(Debug, Deserialize)]
pub struct APIChoice {
    pub message: APIMessage,
}

#[derive(Debug, Deserialize)]
pub struct APIResponse {
    pub choices: Vec<APIChoice>,
}
