use std::env;

use anyhow::Context;
use async_openai::{
    config::OpenAIConfig, error::OpenAIError, types::CreateCompletionRequestArgs, Client,
};

pub struct GptClient {
    client: Client<OpenAIConfig>,
}

#[derive(Debug, thiserror::Error)]
pub enum GptError {
    #[error(transparent)]
    OpenAIError(#[from] OpenAIError),
    #[error(transparent)]
    Other(#[from] anyhow::Error),
}

impl GptClient {
    pub fn init(openai_base_url: &str, openai_key: &str) -> Self {
        let config = OpenAIConfig::new()
            .with_api_base(openai_base_url)
            .with_api_key(openai_key);

        let client = Client::with_config(config);
        GptClient { client }
    }

    pub fn init_from_env() -> Self {
        let openai_base_url = env::var("OPENAI_BASE_URL").expect("OPENAI_BASE_URL must be set");
        let openai_key = env::var("OPENAI_KEY").expect("OPENAI_KEY must be set");
        Self::init(&openai_base_url, &openai_key)
    }

    pub async fn model_list(&self) -> Result<String, GptError> {
        let model_list = self.client.models().list().await?;
        println!("List of models:\n {:#?}", model_list.data);
        Ok("".to_string())
    }

    pub async fn request(&self, prompt: &str) -> Result<String, GptError> {
        let request = CreateCompletionRequestArgs::default()
            .model("gpt-3.5-turbo-0125")
            .prompt(prompt)
            .max_tokens(512u32)
            .build()
            .with_context(|| format!("创建GPT请求失败: {}", prompt))?;

        // Call API
        let response = self
            .client
            .completions() // Get the API "group" (completions, images, etc.) from the client
            .create(request) // Make the API call in that "group"
            .await
            .with_context(|| "GPT响应异常")?;

        let text = response
            .choices
            .first()
            .with_context(|| "GPT无响应")?
            .text
            .clone();
        Ok(text)
    }
}

#[cfg(test)]
mod tests {
    use std::collections::HashMap;

    use super::*;
    use async_openai::types::{
        ChatCompletionFunctionsArgs, ChatCompletionMessageToolCall, ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestMessage, ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestToolMessageArgs, ChatCompletionRequestUserMessageArgs, ChatCompletionToolArgs, ChatCompletionToolType, CreateChatCompletionRequestArgs, FunctionObjectArgs
    };
    use dotenvy::dotenv;
    use futures::StreamExt;
    use rand::{seq::SliceRandom, Rng};
    use rand::thread_rng;
    use serde_json::{json, Value};
    use std::io::{stdout, Write};

    fn setup() {
        dotenv().ok();
    }

    #[tokio::test]
    async fn test_new_client() -> Result<(), GptError> {
        setup();
        let client = GptClient::init_from_env();

        let resp = client.request("请告诉我什么是爱").await?;
        println!("GPT RESP: {}", resp);

        //client.model_list().await?;

        Ok(())
    }

    #[tokio::test]
    async fn tesss() {
        let ss = text_function().await;
        ss.unwrap();
    }

    async fn text_function() -> Result<(), GptError> {
        setup();
        let openai_base_url = env::var("OPENAI_BASE_URL").expect("OPENAI_BASE_URL must be set");
        let openai_key = env::var("OPENAI_KEY").expect("OPENAI_KEY must be set");
        let config = OpenAIConfig::new()
            .with_api_base(openai_base_url)
            .with_api_key(openai_key);

        let client = Client::with_config(config);

        let model = "gpt-4-turbo";
        let user_prompt = "What's the weather like in Boston and Atlanta?";

        // let system_prompt = ChatCompletionRequestSystemMessageArgs::default()
        //     .content("")
        //     .build()?
        //     .into();

        // let input = ChatCompletionRequestUserMessageArgs::default()
        //     .content(user_prompt)
        //     .build()?
        //     .into();

        let request = CreateChatCompletionRequestArgs::default()
            .max_tokens(512u32)
            .model(model)
            .messages([ChatCompletionRequestUserMessageArgs::default()
                .content(user_prompt)
                .build()?
                .into()])
            .tools(vec![ChatCompletionToolArgs::default()
                .r#type(ChatCompletionToolType::Function)
                .function(
                    FunctionObjectArgs::default()
                        .name("get_current_weather")
                        .description("Get the current weather in a given location")
                        .parameters(json!({
                            "type": "object",
                            "properties": {
                                "location": {
                                    "type": "string",
                                    "description": "The city and state, e.g. San Francisco, CA",
                                },
                                "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] },
                            },
                            "required": ["location"],
                        }))
                        .build()?,
                )
                .build()?])
            .build()?;

        println!("8888888");

        let response_message = client
            .chat()
            .create(request)
            .await?
            .choices
            .first()
            .unwrap()
            .message
            .clone();

        println!("999999");

        if let Some(tool_calls) = response_message.tool_calls {
            println!("0000000");
            let mut handles = Vec::new();
            for tool_call in tool_calls {
                let name = tool_call.function.name.clone();
                let args = tool_call.function.arguments.clone();
                let tool_call_clone = tool_call.clone();
    
                let handle =
                    tokio::spawn(async move { call_fn(&name, &args).await.unwrap_or_default() });
                handles.push((handle, tool_call_clone));
            }
    
            let mut function_responses = Vec::new();
    
            for (handle, tool_call_clone) in handles {
                if let Ok(response_content) = handle.await {
                    function_responses.push((tool_call_clone, response_content));
                }
            }
    
            let mut messages: Vec<ChatCompletionRequestMessage> =
                vec![ChatCompletionRequestUserMessageArgs::default()
                    .content(user_prompt)
                    .build()?
                    .into()];
    
            let tool_calls: Vec<ChatCompletionMessageToolCall> = function_responses
                .iter()
                .map(|(tool_call, _response_content)| tool_call.clone())
                .collect();
    
            let assistant_messages: ChatCompletionRequestMessage =
                ChatCompletionRequestAssistantMessageArgs::default()
                    .tool_calls(tool_calls)
                    .build()?
                    .into();
    
            let tool_messages: Vec<ChatCompletionRequestMessage> = function_responses
                .iter()
                .map(|(tool_call, response_content)| {
                    ChatCompletionRequestToolMessageArgs::default()
                        .content(response_content.to_string())
                        .tool_call_id(tool_call.id.clone())
                        .build()
                        .unwrap()
                        .into()
                })
                .collect();
    
            messages.push(assistant_messages);
            messages.extend(tool_messages);
    
            let subsequent_request = CreateChatCompletionRequestArgs::default()
                .max_tokens(512u32)
                .model("gpt-4-turbo")
                .messages(messages)
                .build()
                .with_context(||"subsequent_request build error")?;
    
            let mut stream = client.chat().create_stream(subsequent_request).await?;
    
            let mut response_content = String::new();
            let mut lock = stdout().lock();
            println!("11111");
            while let Some(result) = stream.next().await {
                match result {
                    Ok(response) => {
                        for chat_choice in response.choices.iter() {
                            if let Some(ref content) = chat_choice.delta.content {
                                write!(lock, "{}", content).unwrap();
                                response_content.push_str(content);
                            }
                        }
                        println!("22222");
                        println!("{}", response_content);
                    }
                    Err(err) => {
                        return Err(GptError::OpenAIError(err));
                    }
                }
            }
        }

        Ok(())
    }

    async fn call_fn(name: &str, args: &str) -> Result<Value, Box<dyn std::error::Error>> {
        let mut available_functions: HashMap<&str, fn(&str, &str) -> serde_json::Value> =
            HashMap::new();
        available_functions.insert("get_current_weather", get_current_weather);
    
        let function_args: serde_json::Value = args.parse().unwrap();
    
        let location = function_args["location"].as_str().unwrap();
        let unit = function_args["unit"].as_str().unwrap_or("fahrenheit");
        let function = available_functions.get(name).unwrap();
        let function_response = function(location, unit);
        Ok(function_response)
    }
    
    fn get_current_weather(location: &str, unit: &str) -> serde_json::Value {
        let mut rng = thread_rng();
    
        let temperature: i32 = rng.gen_range(20..=55);
    
        let forecasts = [
            "sunny", "cloudy", "overcast", "rainy", "windy", "foggy", "snowy",
        ];
    
        let forecast = forecasts.choose(&mut rng).unwrap_or(&"sunny");
    
        let weather_info = json!({
            "location": location,
            "temperature": temperature.to_string(),
            "unit": unit,
            "forecast": forecast
        });
    
        weather_info
    }
}
