use std::{
    path::PathBuf,
    sync::{Arc, Mutex},
};

use burn::{
    data::{
        dataloader::batcher::Batcher,
        dataset::vision::{Annotation, ImageDatasetItem},
    },
    prelude::*,
};
use image::{ColorType, DynamicImage, EncodableLayout, GrayImage, imageops::FilterType};

use crate::data::utils::otsu_threshold;

#[derive(Clone, Default)]
pub struct HWDBBatcher {
    pub statistics: Arc<Mutex<(f32, f32, f32, i32, i32)>>,
    pub enable_statistics: bool,
    pub height: usize,
    pub width: usize,
    pub features: Option<(f32, f32, i32, i32)>,
}

impl HWDBBatcher {
    pub fn new(
        enable_statistics: bool,
        height: usize,
        width: usize,
        norm_vals: Option<(f32, f32, i32, i32)>,
    ) -> Self {
        Self {
            statistics: Arc::new(Mutex::new((0.0, 0.0, 0.0, 0, 0))),
            enable_statistics,
            height,
            width,
            features: norm_vals,
        }
    }
}

#[derive(Clone, Debug)]
pub struct HWDBBatch<B: Backend> {
    pub images: Tensor<B, 3>,
    pub labels: Tensor<B, 1, Int>,
}

impl<B: Backend> Batcher<B, ImageDatasetItem, HWDBBatch<B>> for HWDBBatcher {
    fn batch(&self, items: Vec<ImageDatasetItem>, device: &<B as Backend>::Device) -> HWDBBatch<B> {
        let images_raw: Vec<(Vec<f32>, Vec<f32>)> = items
            .iter()
            .map(|item| {
                (
                    item.image
                        .iter()
                        .map(|pixel_depth| {
                            u8::try_from(pixel_depth.clone()).expect("not gray image")
                        })
                        .collect::<Vec<_>>(),
                    DynamicImage::ImageLuma8(
                        GrayImage::from_vec(
                            item.image_width as u32,
                            item.image_height as u32,
                            item.image
                                .iter()
                                .map(|pixel_depth| {
                                    u8::try_from(pixel_depth.clone()).expect("not gray image")
                                })
                                .collect::<Vec<_>>(),
                        )
                        .expect("can't transfer to gray image"),
                    ),
                )
            })
            .map(|(pixels, image)| {
                (
                    pixels,
                    image.resize_exact(self.width as u32, self.height as u32, FilterType::Gaussian),
                )
            })
            .filter(|(_, image)| image.color() == ColorType::L8)
            .map(|(pixels, image)| (pixels, image.to_luma8()))
            .map(|(pixels, image)| {
                (
                    pixels
                        .iter()
                        .map(|pixel| (*pixel as f32) / 255_f32)
                        .collect::<Vec<f32>>(),
                    image
                        .iter()
                        .map(|pixel| (*pixel as f32) / 255_f32)
                        .collect::<Vec<f32>>(),
                )
            })
            .collect();

        if self.enable_statistics {
            let local_n: f32 = images_raw.iter().map(|(image, _)| image.len() as f32).sum();
            let local_sum: f32 = images_raw
                .iter()
                .map(|(image, _)| image.iter().map(|&x| x as f32).sum::<f32>())
                .sum();
            let local_sq: f32 = images_raw
                .iter()
                .map(|(image, _)| image.iter().map(|&x| x * (x as f32)).sum::<f32>())
                .sum();
            let local_max_height = items
                .iter()
                .map(|item| item.image_height as i32)
                .max()
                .unwrap();
            let local_max_width = items
                .iter()
                .map(|item| item.image_width as i32)
                .max()
                .unwrap();

            let mut g = self.statistics.lock().unwrap();
            let (sum, sq, n, max_height, max_width) = &mut *g;
            *n = local_n;
            *sum = local_sum;
            *sq = local_sq;
            *max_height = if local_max_height > *max_height {
                local_max_height
            } else {
                *max_height
            };
            *max_width = if local_max_width > *max_width {
                local_max_width
            } else {
                *max_width
            };
            drop(g);
        }

        let images_vec: Vec<Tensor<B, 2>> = images_raw
            .iter()
            .map(|(_, image)| TensorData::from(image.as_slice()).convert::<B::FloatElem>())
            .map(|image| Tensor::<B, 1>::from_data(image, device))
            .map(|image| {
                if let Some((mean, std, _, _)) = self.features {
                    (image - mean) / std
                } else {
                    image
                }
            })
            .map(|image| image.reshape([self.height as i32, self.width as i32]))
            .collect();

        let targets: Vec<Tensor<B, 1, Int>> = items
            .iter()
            .map(|item| {
                if let Annotation::Label(label) = item.annotation {
                    TensorData::from([label]).convert::<B::IntElem>()
                } else {
                    panic!("Annotation can't parse")
                }
            })
            .map(|data| Tensor::<B, 1, Int>::from_data(data, device))
            .collect();

        let images = Tensor::stack(images_vec, 0);
        let targets = Tensor::cat(targets, 0);

        HWDBBatch {
            images,
            labels: targets,
        }
    }
}

impl HWDBBatcher {
    pub fn transfer_batch<B: Backend>(
        &self,
        items: Vec<String>,
        device: &<B as Backend>::Device,
        save_images: Option<PathBuf>,
    ) -> Tensor<B, 3> {
        let images: Vec<DynamicImage> = items
            .iter()
            .map(|item| image::open(item).expect("Failed to open image"))
            .map(|image| {
                image.resize_exact(self.width as u32, self.height as u32, FilterType::Gaussian)
            })
            .collect();
        let images: Vec<GrayImage> = images.iter().map(|image| image.to_luma8()).collect();
        // 使用image库和手动实现的Otsu二值化
        let images: Vec<GrayImage> = images
            .iter()
            .map(|image| {
                // 手动计算直方图
                let mut hist = [0u32; 256];
                for pixel in image.pixels() {
                    hist[pixel[0] as usize] += 1;
                }

                // 使用Otsu算法计算最佳阈值
                let total_pixels = (image.width() * image.height()) as u32;
                let threshold = otsu_threshold(&hist, total_pixels);

                // 应用阈值进行二值化
                let mut binary_image = image.clone();
                for pixel in binary_image.pixels_mut() {
                    *pixel = if pixel[0] as u32 > threshold {
                        image::Luma([255u8])
                    } else {
                        image::Luma([0u8])
                    };
                }
                binary_image
            })
            .collect();
        // 如果指定保存中间图片的话，则保存指定路径
        if let Some(save_images) = save_images {
            images.iter().enumerate().for_each(|(idx, image)| {
                image::save_buffer(
                    save_images.join(&format!("save_{}.png", idx)),
                    image.as_bytes(),
                    self.width as u32,
                    self.height as u32,
                    image::ColorType::L8,
                )
                .expect("Failed to save image")
            });
        }
        let images: Vec<Vec<f32>> = images
            .iter()
            .map(|image| {
                image
                    .iter()
                    .map(|pixel| (*pixel as f32) / 255_f32)
                    .collect()
            })
            .collect();
        let images: Vec<Tensor<B, 2>> = images
            .iter()
            .map(|image| TensorData::from(image.as_slice()).convert::<B::FloatElem>())
            .map(|image| Tensor::<B, 1>::from_data(image, device))
            .map(|image| {
                if let Some((mean, std, _, _)) = self.features {
                    (image - mean) / std
                } else {
                    image
                }
            })
            .map(|image| image.reshape([self.height as i32, self.width as i32]))
            .collect();
        Tensor::stack(images, 0)
    }
}
