use crate::{
    error::ServerError,
    abstractions::{data::Data},
    protocol::type_schema::TypeSchema
};
use image::{DynamicImage, ImageBuffer, Rgba, codecs::png::PngEncoder, GenericImageView};
use serde_json::json;
use std::sync::{Arc, Mutex};
use std::path::Path;
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};
use web_view::*;
use std::process::Command;
use std::fs;
use std::io::Cursor;

struct StimulusExtractionState {
    point: (u32, u32),
    image_data: String,
    image_size: (u32, u32),
    video_path: String,
    threshold: u8,
}

impl StimulusExtractionState {
    fn new(video_path: &str, threshold: u8) -> Result<Self, ServerError> {
        // 创建临时文件路径
        let temp_dir = std::env::temp_dir();
        let temp_image_path = temp_dir.join("first_frame.png");
        
        // 使用ffmpeg提取第一帧
        let output = Command::new("ffmpeg")
            .args([
                "-i", video_path,
                "-vframes", "1",
                "-q:v", "2",
                temp_image_path.to_str().unwrap()
            ])
            .output()
            .map_err(|e| ServerError::BuiltinTaskError(format!("无法执行ffmpeg命令: {}", e)))?;

        if !output.status.success() {
            let error = String::from_utf8_lossy(&output.stderr);
            return Err(ServerError::BuiltinTaskError(format!("ffmpeg执行失败: {}", error)));
        }

        // 读取生成的图像
        let image = image::open(&temp_image_path)
            .map_err(|e| ServerError::BuiltinTaskError(format!("无法打开提取的图像: {}", e)))?;
        
        let (width, height) = image.dimensions();

        // 转换为base64
        let mut buffer = Vec::new();
        let cursor = Cursor::new(&mut buffer);
        let encoder = PngEncoder::new(cursor);
        image.write_with_encoder(encoder)
            .map_err(|e| ServerError::BuiltinTaskError(format!("无法编码图像: {}", e)))?;
        let base64 = BASE64.encode(buffer);

        // 清理临时文件
        let _ = fs::remove_file(temp_image_path);

        Ok(Self {
            point: (0, 0),
            image_data: format!("data:image/png;base64,{}", base64),
            image_size: (width, height),
            video_path: video_path.to_string(),
            threshold,
        })
    }

    fn extract_stimulus_frames(&self) -> Result<(Vec<usize>, Vec<bool>), ServerError> {
        // 获取视频的总帧数
        println!("开始获取视频总帧数...");
        let output = Command::new("ffprobe")
            .args([
                "-v", "error",
                "-select_streams", "v:0",
                "-count_packets",
                "-show_entries", "stream=nb_read_packets",
                "-of", "csv=p=0",
                &self.video_path
            ])
            .output()
            .map_err(|e| ServerError::BuiltinTaskError(format!("无法获取视频帧数: {}", e)))?;

        let frame_count_str = String::from_utf8_lossy(&output.stdout);
        let frame_count: usize = frame_count_str.trim().parse()
            .map_err(|e| ServerError::BuiltinTaskError(format!("无法解析视频帧数: {}", e)))?;
            
        println!("视频总帧数: {}", frame_count);

        let (x, y) = (self.point.0, self.point.1);
        
        // 定义判断区域 (7x7)
        let domain_top = if y >= 3 { y - 3 } else { 0 };
        let domain_bottom = std::cmp::min(y + 3, self.image_size.1 - 1);
        let domain_left = if x >= 3 { x - 3 } else { 0 };
        let domain_right = std::cmp::min(x + 3, self.image_size.0 - 1);
        
        let mut stimulus_frames = Vec::new();
        let mut is_stimulus = vec![false; frame_count];

        // 创建临时目录用于帧处理
        let temp_dir = std::env::temp_dir();
        let temp_frame_path = temp_dir.join("current_frame.png");
        
        println!("开始分析视频帧...");

        // 设置处理批次大小，以加快处理速度
        let batch_size = 10;
        let total_batches = (frame_count + batch_size - 1) / batch_size;
        
        for batch in 0..total_batches {
            let start_frame = batch * batch_size;
            let end_frame = std::cmp::min((batch + 1) * batch_size, frame_count);
            
            println!("处理批次 {}/{}: 帧 {} 到 {}", batch + 1, total_batches, start_frame, end_frame - 1);
            
            for frame_index in start_frame..end_frame {
                // 使用ffmpeg提取单帧
                let output = Command::new("ffmpeg")
                    .args([
                        "-i", &self.video_path,
                        "-vf", &format!("select=eq(n\\,{})", frame_index),
                        "-vframes", "1",
                        "-q:v", "2",
                        "-y", // 覆盖已存在的文件
                        temp_frame_path.to_str().unwrap()
                    ])
                    .output()
                    .map_err(|e| ServerError::BuiltinTaskError(format!("无法提取帧 {}: {}", frame_index, e)))?;

                if !output.status.success() {
                    // 跳过无法提取的帧
                    println!("警告: 跳过无法提取的帧 {}", frame_index);
                    continue;
                }

                // 读取当前帧图像
                let image = match image::open(&temp_frame_path) {
                    Ok(img) => img,
                    Err(e) => {
                        println!("无法打开帧 {}: {}", frame_index, e);
                        continue;
                    }
                };

                // 转换为灰度图并分析指定区域
                let gray_image = image.to_luma8();
                let mut max_value = 0;

                for y_offset in domain_top..=domain_bottom {
                    for x_offset in domain_left..=domain_right {
                        if let Some(pixel) = gray_image.get_pixel_checked(x_offset, y_offset) {
                            let brightness = pixel[0];
                            if brightness > max_value {
                                max_value = brightness;
                            }
                        }
                    }
                }

                // 判断是否为刺激帧
                if max_value > self.threshold {
                    stimulus_frames.push(frame_index);
                    is_stimulus[frame_index] = true;
                    println!("检测到刺激帧: {}", frame_index + 1);
                }
                
                // 更新进度
                if (frame_index + 1) % 10 == 0 || frame_index == start_frame || frame_index == end_frame - 1 {
                    println!("已处理 {}/{} 帧 ({}%)", 
                        frame_index + 1, 
                        frame_count, 
                        ((frame_index + 1) as f32 / frame_count as f32 * 100.0) as usize
                    );
                }
            }
        }

        // 清理临时文件
        let _ = fs::remove_file(temp_frame_path);

        println!("分析完成，共找到 {} 个刺激帧", stimulus_frames.len());
        
        Ok((stimulus_frames, is_stimulus))
    }
}

/// builtin.stimulus_extraction
/// itype: StimulusRequest ::= Json
/// otype: StimulusResult ::= Json
pub fn extract_stimulus_func(input: Data) -> Result<Data, ServerError> {
    let (video_path, threshold) = match input {
        Data::DataTuple(mut vec) => {
            if vec.len() != 2 {
                return Err(ServerError::BuiltinTaskErrorStr("输入必须是包含视频路径和阈值的元组"));
            }
            (vec.remove(0).try_as_string()?, vec.remove(0).try_as_int().unwrap_or(150) as u8)
        }
        _ => return Err(ServerError::BuiltinTaskErrorStr("输入必须是包含视频路径和阈值的元组")),
    };
         
    let state = Arc::new(Mutex::new(StimulusExtractionState::new(&video_path, threshold)?));
    let state_clone = state.clone();
    
    let result = Arc::new(Mutex::new(None));
    let result_clone = result.clone();

    // 读取HTML模板
    let html_template = include_str!("stimulus_extraction.html");
    let html = html_template.replace("{IMAGE_DATA}", &state.lock().unwrap().image_data);

    let webview = web_view::builder()
        .title("刺激提取工具")
        .content(Content::Html(html))
        .size(800, 600)
        .resizable(true)
        .debug(true)
        .user_data(())
        .invoke_handler(|webview, arg| {
            let point: Vec<f32> = serde_json::from_str(arg).unwrap();
            
            if point.len() != 2 {
                webview.eval("alert('请选择一个点')").unwrap();
                return Ok(());
            }
            
            println!("已选择点: ({}, {})", point[0], point[1]);
            
            let mut state = state_clone.lock().unwrap();
            state.point = (point[0] as u32, point[1] as u32);
            
            // 更新UI提示正在处理
            webview.eval("showProcessing()").unwrap();
            
            match state.extract_stimulus_frames() {
                Ok((stimulus_frames, is_stimulus)) => {
                    let result_json = json!({
                        "stimulus_frames": stimulus_frames,
                        "is_stimulus": is_stimulus,
                        "description": "刺激帧索引和每帧状态布尔值"
                    });
                    
                    *result_clone.lock().unwrap() = Some(result_json);
                    
                    // 通知UI处理完成
                    webview.eval("processingComplete()").unwrap();
                    webview.exit();
                },
                Err(e) => {
                    webview.eval(&format!("alert('处理失败: {:?}')", e)).unwrap();
                }
            }
            
            Ok(())
        })
        .build()
        .map_err(|e| ServerError::BuiltinTaskError(format!("无法创建webview: {}", e)))?;

    webview.run()
        .map_err(|e| ServerError::BuiltinTaskError(format!("webview运行错误: {}", e)))?;

    if let Some(result) = result.lock().unwrap().take() {
        Ok(Data::DataJson(result))
    } else {
        Err(ServerError::BuiltinTaskError("用户取消操作".to_string()))
    }
}

pub fn test_extract_stimulus_frames() {
    let path = r"E:\Dev\files.GitHub\Cockroach-video-parse\src\DSC_2059.MOV";
    let input = Data::DataTuple(vec![Data::DataStr(path.to_string()), Data::DataInt(150)]);
    let result = extract_stimulus_func(input);
    println!("{:?}", result);
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::abstractions::data::Data;
    
    #[test]
    fn test_extract_stimulus_func() {
        let path = r"E:\Dev\files.GitHub\Cockroach-video-parse\src\DSC_2059.MOV";
        let input = Data::DataTuple(vec![Data::DataStr(path.to_string()), Data::DataInt(150)]);
        let result = extract_stimulus_func(input);
        println!("{:?}", result);
    }
}
