use std::io;

use thiserror::Error;


#[derive(Debug, Error)]
enum TestError {
    #[error("env error: {0}")]
    VarError(String),

    #[error("HTML to Markdown conversion error: {0}")]
    IoError(#[from] io::Error),

    #[error("Prompt error: {0}")]
    PromptError(#[from] rig::completion::PromptError),
}



#[cfg(test)]
mod tests {
    use std::env;
    use std::fs;
    use std::path::Path;

    use base64::{prelude::BASE64_STANDARD, Engine};
    use rig::{agent::AgentBuilder, completion::Prompt, message::{ContentFormat, Image, ImageDetail, ImageMediaType, Message, Text, UserContent}, providers::openai, OneOrMany};

    use super::*;

    /// 读取目录中的所有PNG图片并转换为Image对象
    fn read_images_from_directory(dir_path: &str) -> Result<Vec<Image>, TestError> {
        let mut images = Vec::new();
        let dir = Path::new(dir_path);

        if !dir.exists() || !dir.is_dir() {
            return Err(TestError::IoError(std::io::Error::new(
                std::io::ErrorKind::NotFound,
                format!("Directory not found: {}", dir_path)
            )));
        }

        for entry in fs::read_dir(dir)? {
            let entry = entry?;
            let path = entry.path();

            // 只处理PNG和JPG图片
            if path.is_file() && (path.extension().map_or(false, |ext| ext == "png" || ext == "jpg")) {
                println!("读取图片: {}", path.display());

                let image_bytes = fs::read(&path)?;
                let encoded = BASE64_STANDARD.encode(&image_bytes);

                // 根据文件扩展名确定媒体类型
                let media_type = if path.extension().map_or(false, |ext| ext == "jpg") {
                    ImageMediaType::JPEG
                } else {
                    ImageMediaType::PNG
                };

                // 创建Image对象
                let image = Image {
                    data: format!("data:image/{};base64,{}",
                        if media_type == ImageMediaType::JPEG { "jpeg" } else { "png" },
                        encoded),
                    media_type: Some(media_type),
                    format: Some(ContentFormat::Base64),
                    detail: Some(ImageDetail::Auto),
                };

                images.push(image);
            }
        }

        if images.is_empty() {
            return Err(TestError::IoError(std::io::Error::new(
                std::io::ErrorKind::NotFound,
                format!("No images found in directory: {}", dir_path)
            )));
        }

        println!("共读取 {} 张图片", images.len());
        Ok(images)
    }

    #[tokio::test]
    async fn test_split_images() -> Result<(), TestError> {
        dotenvy::from_path("../../.env").ok();

        let target_file_path = "resources/use.md";
        let content = std::fs::read_to_string(target_file_path)?;

        // 读取split_images目录中的所有图片
        let images = read_images_from_directory("resources/split_images")?;

        let api_key = env::var("OPENAI_API_KEY").map_err(|e| TestError::VarError(e.to_string()))?;
        let base_url = env::var("OPENAI_BASE_URL").map_err(|e| TestError::VarError(e.to_string()))?;
        let client = openai::Client::from_url(&api_key, &base_url);
        let model = client.completion_model("gpt-4.1-mini");
        let system = r#"
Please act as a **highly detail-oriented** content analysis and format conversion expert. Your task is to combine image recognition with the given textual context information to generate a **comprehensive, detail-rich** structured Markdown document.

Please follow these steps, and **strive for the most thorough exploration and description in each step**:

1.  **Visual Content Extraction**: Use your visual recognition capabilities to **meticulously analyze** the images I provide. **Not only** accurately and completely extract all visible text, key elements, and overall layout structure from the images, **but also describe in as much detail as possible** the characteristics of these elements, their interrelationships, and your overall impression. **Do not omit any visual details**.
2.  **Contextual Information Integration**: **Deeply** read the context (Context) text I provide. Identify and extract **all** supplementary information related to the image content, especially information that the image itself cannot directly display, such as:
    *   Specific URLs of images or media resources
    *   Related hyperlink addresses
    *   **Detailed** textual annotations or background explanations
    *   **All** details mentioned in the context but not clearly shown in the images
    *   **Any information that might help understand the background or deeper meaning of the images**
3.  **Markdown Document Generation**: Integrate the above two parts of information and generate a Markdown document according to the following requirements:
    *   **Main Title**: Use an H1 level title (`# Title`) as the highest level title of the document.
    *   **Content Presentation**: Use lists (ordered or unordered), quotes, code blocks, **and necessary descriptive paragraphs** to appropriately present the extracted and integrated information. **For key information points, provide detailed explanations, avoiding being overly brief**.
    *   **Accurate Links**: Ensure that all URLs and hyperlinks supplemented from the context are accurately embedded in the Markdown document.
    *   **Comprehensive and Thorough**: Ensure that the final document not only accurately reflects the image content and includes all relevant supplementary information provided by the context, **but is also as detailed and in-depth as possible in its description**. **The goal is to generate an information-rich, content-full document, with length sufficient to cover all identified details**.

Please ensure that the final output Markdown document is accurate in content, clear in structure, standardized in format, and **meets the highest standards in terms of detail**.
"#;
        let agent = AgentBuilder::new(model)
            .preamble(system)
            .temperature(0.4)
            //.context(&content)
            .build();


        // 仅使用前3个图片
        let mut user_contents = Vec::new();
        for (i, image) in images.into_iter().enumerate() {
            if i < 1 {
                println!("使用图片 #{}", i + 1);
                user_contents.push(UserContent::Image(image));
            } else {
                break;
            }
        }

        // 添加文档内容
        // Add text prompt in English
        user_contents.push(UserContent::Text(Text {
            text: "Please analyze these screenshots, which are different parts of the same long screenshot, arranged in sequence. Extract all important information and generate a detailed Markdown document.".to_string()
        }));

        // Use OneOrMany::many to create a message containing multiple contents
        let message = Message::User {
            content: OneOrMany::many(user_contents).unwrap(),
        };

        let response = agent.prompt(message).await?;

        let output_path = "resources/split_images_analysis_first3.md";
        std::fs::write(output_path, response)?;
        println!("Analysis result has been saved to: {}", output_path);
        Ok(())
    }
}