use futures::{future::BoxFuture, FutureExt};
use tiktoken_rs;
use ulid::Ulid;
use std::sync::OnceLock;

pub fn gen_ulid() -> String {
    let ulid = Ulid::new();
    ulid.to_string()
}

/// 获取默认的cl100k_base编码器，用于计算token数量
static CL100K_BASE: OnceLock<tiktoken_rs::CoreBPE> = OnceLock::new();

/// 计算字符串的token数量
///
/// 使用OpenAI的cl100k_base编码器计算字符串的token数量。
/// 这个编码器适用于gpt-4, gpt-3.5-turbo等模型。
///
/// # 参数
///
/// * `text` - 要计算token数量的字符串
///
/// # 返回值
///
/// 返回字符串的token数量，如果编码器初始化失败则返回None
///
/// # 示例
///
/// ```
/// use common::utils::count_tokens;
///
/// let text = "Hello, world!";
/// let token_count = count_tokens(text);
/// println!("Token count: {:?}", token_count);
/// ```
pub fn count_tokens(text: &str) -> Option<usize> {
    let encoder = CL100K_BASE.get_or_init(|| {
        tiktoken_rs::cl100k_base().unwrap()
    });

    Some(encoder.encode_with_special_tokens(text).len())
}

/// 根据模型名称计算字符串的token数量
///
/// 使用指定模型的编码器计算字符串的token数量。
/// 支持的模型包括gpt-4, gpt-3.5-turbo, text-embedding-ada-002等。
///
/// # 参数
///
/// * `text` - 要计算token数量的字符串
/// * `model` - 模型名称，如"gpt-4", "gpt-3.5-turbo"等
///
/// # 返回值
///
/// 返回字符串的token数量，如果获取编码器失败则返回None
///
/// # 示例
///
/// ```
/// use common::utils::count_tokens_for_model;
///
/// let text = "Hello, world!";
/// let token_count = count_tokens_for_model(text, "gpt-4");
/// println!("Token count for GPT-4: {:?}", token_count);
/// ```
pub fn count_tokens_for_model(text: &str, model: &str) -> Option<usize> {
    match tiktoken_rs::get_bpe_from_model(model) {
        Ok(encoder) => Some(encoder.encode_with_special_tokens(text).len()),
        Err(_) => count_tokens(text) // 如果获取特定模型的编码器失败，则使用默认编码器
    }
}

pub fn compose_async<F, G, T>(f: F, g: G) -> impl Fn(T) -> BoxFuture<'static, T>
where
    F: Fn(T) -> BoxFuture<'static, T> + Clone + Send + 'static,
    G: Fn(T) -> BoxFuture<'static, T> + Clone + Send + 'static,
    T: Clone + Send + 'static,
{
    move |arg: T| {
        let f = f.clone();
        let g = g.clone();
        async move {
            let res1 = f(arg).await;
            let res2 = g(res1.clone()).await;
            res2
        }
        .boxed()
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_gen_ulid() {
        let ulid = gen_ulid();
        assert_eq!(ulid.len(), 26);
    }

    #[test]
    fn test_count_tokens() {
        let text = "Hello, world!";
        let token_count = count_tokens(text);
        assert!(token_count.is_some());
        println!("Token count for '{}': {:?}", text, token_count);

        // 测试中文文本
        let chinese_text = "你好，世界！";
        let chinese_token_count = count_tokens(chinese_text);
        assert!(chinese_token_count.is_some());
        println!("Token count for '{}': {:?}", chinese_text, chinese_token_count);

        // 测试特定模型的token计数
        let gpt4_token_count = count_tokens_for_model(text, "gpt-4");
        assert!(gpt4_token_count.is_some());
        println!("Token count for '{}' with GPT-4: {:?}", text, gpt4_token_count);
    }

    #[tokio::test]
    async fn test() {
        async fn fn_a(arg: String) -> String {
            format!("{}: A", arg)
        }

        async fn fn_b(arg: String) -> String {
            format!("{}: B", arg)
        }
        let async_a = |s: String| fn_a(s).boxed();
        let async_b = |s: String| fn_b(s).boxed();
        let composed = compose_async(async_a, async_b);
        let result = composed("test".to_string()).await;
        println!("{}", result);
    }

}
