use burn::{
    config::Config,
    module::Module,
    nn::{
        conv::{Conv2d, Conv2dConfig}, pool::{MaxPool2d, MaxPool2dConfig}, LeakyRelu, Linear, LinearConfig, PaddingConfig2d
    },
    tensor::{backend::Backend, Tensor},
};

#[derive(Module, Debug)]
pub struct YoloV1Model<B: Backend> {
    conv1_1: Conv2dActivation<B>,
    pool1_1: MaxPool2d,

    conv2_1: Conv2dActivation<B>,
    pool2_1: MaxPool2d,

    conv3_1: Conv2dActivation<B>,
    conv3_2: Conv2dActivation<B>,
    conv3_3: Conv2dActivation<B>,
    conv3_4: Conv2dActivation<B>,
    pool3_1: MaxPool2d,

    conv4_1: Conv2dActivation<B>,
    conv4_2: Conv2dActivation<B>,
    conv4_3: Conv2dActivation<B>,
    conv4_4: Conv2dActivation<B>,
    conv4_5: Conv2dActivation<B>,
    conv4_6: Conv2dActivation<B>,
    conv4_7: Conv2dActivation<B>,
    conv4_8: Conv2dActivation<B>,
    conv4_9: Conv2dActivation<B>,
    conv4_10: Conv2dActivation<B>,
    pool4_1: MaxPool2d,

    conv5_1: Conv2dActivation<B>,
    conv5_2: Conv2dActivation<B>,
    conv5_3: Conv2dActivation<B>,
    conv5_4: Conv2dActivation<B>,
    conv5_5: Conv2dActivation<B>,
    conv5_6: Conv2dActivation<B>,

    conv6_1: Conv2dActivation<B>,
    conv6_2: Conv2dActivation<B>,

    fc1: Linear<B>,
    fc2: Linear<B>,

}

impl<B: Backend> YoloV1Model<B> {
    pub fn forward(&self, x: Tensor<B, 4>) -> Tensor<B, 2> {
        // Forward pass through all layers
        let mut x = self.conv1_1.forward(x);
        x = self.pool1_1.forward(x);
        
        x = self.conv2_1.forward(x);
        x = self.pool2_1.forward(x);
        
        x = self.conv3_1.forward(x);
        x = self.conv3_2.forward(x);
        x = self.conv3_3.forward(x);
        x = self.conv3_4.forward(x);
        x = self.pool3_1.forward(x);
        
        x = self.conv4_1.forward(x);
        x = self.conv4_2.forward(x);
        x = self.conv4_3.forward(x);
        x = self.conv4_4.forward(x);
        x = self.conv4_5.forward(x);
        x = self.conv4_6.forward(x);
        x = self.conv4_7.forward(x);
        x = self.conv4_8.forward(x);
        x = self.conv4_9.forward(x);
        x = self.conv4_10.forward(x);
        x = self.pool4_1.forward(x);
        
        x = self.conv5_1.forward(x);
        x = self.conv5_2.forward(x);
        x = self.conv5_3.forward(x);
        x = self.conv5_4.forward(x);
        x = self.conv5_5.forward(x);
        x = self.conv5_6.forward(x);
        
        x = self.conv6_1.forward(x);
        x = self.conv6_2.forward(x);
        
        // Flatten and pass through fully connected layers
        let [batch_size, channels, height, width] = x.dims();
        let x = x.reshape([batch_size, channels * height * width]);
        let x = self.fc1.forward(x);
        self.fc2.forward(x) // Output: [batch_size, grid_size * grid_size * (num_classes + num_boxes * 5)]
    }
}

#[derive(Config)]
pub struct ModelConfig {
    pub num_classes: usize,
    pub image_size: [usize; 2],
    #[config(default = 7)]
    pub grid_size: usize,
    #[config(default = 2)]
    pub num_boxes: usize,
    #[config(default = 5.0)]
    pub lambda_coord: f32,
    #[config(default = 0.5)]
    pub lambda_noobj: f32,
}

impl ModelConfig {
    pub fn init<B: Backend>(&self, device: &B::Device) -> YoloV1Model<B> {
        // YOLO v1 architecture implementation
        let conv1_1 = Conv2dActivationConfig::new(3, 64)
            .with_kernel_size(7)
            .with_stride(2)
            .with_padding(Some(3))
            .init(device);
        
        let pool1_1 = MaxPool2dConfig::new([2,2])
            .with_strides([2,2])
            .init();

        let conv2_1 = Conv2dActivationConfig::new(64, 192)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        
        let pool2_1 = MaxPool2dConfig::new([2,2])
            .with_strides([2,2])
            .init();

        let conv3_1 = Conv2dActivationConfig::new(192, 128)
            .with_kernel_size(1)
            .init(device);
        let conv3_2 = Conv2dActivationConfig::new(128, 256)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv3_3 = Conv2dActivationConfig::new(256, 256)
            .with_kernel_size(1)
            .init(device);
        let conv3_4 = Conv2dActivationConfig::new(256, 512)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        
        let pool3_1 = MaxPool2dConfig::new([2,2])
            .with_strides([2,2])
            .init();

        // Layer 4: Multiple 1x1 and 3x3 convolutions
        let conv4_1 = Conv2dActivationConfig::new(512, 256)
            .with_kernel_size(1)
            .init(device);
        let conv4_2 = Conv2dActivationConfig::new(256, 512)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv4_3 = Conv2dActivationConfig::new(512, 256)
            .with_kernel_size(1)
            .init(device);
        let conv4_4 = Conv2dActivationConfig::new(256, 512)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv4_5 = Conv2dActivationConfig::new(512, 256)
            .with_kernel_size(1)
            .init(device);
        let conv4_6 = Conv2dActivationConfig::new(256, 512)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv4_7 = Conv2dActivationConfig::new(512, 256)
            .with_kernel_size(1)
            .init(device);
        let conv4_8 = Conv2dActivationConfig::new(256, 512)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv4_9 = Conv2dActivationConfig::new(512, 512)
            .with_kernel_size(1)
            .init(device);
        let conv4_10 = Conv2dActivationConfig::new(512, 1024)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        
        let pool4_1 = MaxPool2dConfig::new([2,2])
            .with_strides([2,2])
            .init();

        // Layer 5: More convolutions
        let conv5_1 = Conv2dActivationConfig::new(1024, 512)
            .with_kernel_size(1)
            .init(device);
        let conv5_2 = Conv2dActivationConfig::new(512, 1024)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv5_3 = Conv2dActivationConfig::new(1024, 512)
            .with_kernel_size(1)
            .init(device);
        let conv5_4 = Conv2dActivationConfig::new(512, 1024)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv5_5 = Conv2dActivationConfig::new(1024, 1024)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv5_6 = Conv2dActivationConfig::new(1024, 1024)
            .with_kernel_size(3)
            .with_stride(2)
            .with_padding(Some(1))
            .init(device);

        // Layer 6: Final convolutions before fully connected
        let conv6_1 = Conv2dActivationConfig::new(1024, 1024)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);
        let conv6_2 = Conv2dActivationConfig::new(1024, 1024)
            .with_kernel_size(3)
            .with_padding(Some(1))
            .init(device);

        // Fully connected layers
        let fc1 = LinearConfig::new(1024 * 7 * 7, 4096).init(device);
        let fc2 = LinearConfig::new(4096, self.grid_size * self.grid_size * (self.num_classes + self.num_boxes * 5))
            .init(device);

        YoloV1Model {
            conv1_1, pool1_1,
            conv2_1, pool2_1,
            conv3_1, conv3_2, conv3_3, conv3_4, pool3_1,
            conv4_1, conv4_2, conv4_3, conv4_4, conv4_5, conv4_6, conv4_7, conv4_8, conv4_9, conv4_10, pool4_1,
            conv5_1, conv5_2, conv5_3, conv5_4, conv5_5, conv5_6,
            conv6_1, conv6_2,
            fc1,
            fc2,
        }
    }
}

/////////////////////////////////////////////////////
#[derive(Module, Debug)]
pub struct Conv2dActivation<B: Backend> {
    conv: Conv2d<B>,
    activation: LeakyRelu,
}

impl<B: Backend> Conv2dActivation<B> {
    pub fn forward(&self, input: Tensor<B, 4>) -> Tensor<B, 4> {
        let x = self.conv.forward(input);
        self.activation.forward(x)
    }
}

/// [Conv2dNormActivation] configuration.
#[derive(Debug)]
pub struct Conv2dActivationConfig {
    pub in_channels: usize,
    pub out_channels: usize,
    pub kernel_size: usize,
    pub stride: usize,
    pub padding: Option<usize>,
    pub groups: usize,
    pub dilation: usize,
    pub bias: bool,
}

impl Conv2dActivationConfig {
    pub fn new(in_channels: usize, out_channels: usize) -> Self {
        Self {
            in_channels,
            out_channels,
            kernel_size: 3,
            stride: 1,
            padding: None,
            groups: 1,
            dilation: 1,
            bias: false,
        }
    }

    pub fn with_kernel_size(mut self, kernel_size: usize) -> Self {
        self.kernel_size = kernel_size;
        self
    }

    pub fn with_stride(mut self, stride: usize) -> Self {
        self.stride = stride;
        self
    }

    pub fn with_padding(mut self, padding: Option<usize>) -> Self {
        self.padding = padding;
        self
    }

    pub fn with_groups(mut self, groups: usize) -> Self {
        self.groups = groups;
        self
    }

    pub fn with_dilation(mut self, dilation: usize) -> Self {
        self.dilation = dilation;
        self
    }

    pub fn with_bias(mut self, bias: bool) -> Self {
        self.bias = bias;
        self
    }

    pub fn init<B: Backend>(&self, device: &B::Device) -> Conv2dActivation<B> {
        let padding = if let Some(padding) = self.padding {
            padding
        } else {
            (self.kernel_size - 1) / 2 * self.dilation
        };

        Conv2dActivation {
            conv: Conv2dConfig::new(
                [self.in_channels, self.out_channels],
                [self.kernel_size, self.kernel_size],
            )
                .with_padding(PaddingConfig2d::Explicit(padding, padding))
                .with_stride([self.stride, self.stride])
                .with_bias(self.bias)
                .with_dilation([self.dilation, self.dilation])
                .with_groups(self.groups)
                .init(device),
            activation: LeakyRelu { negative_slope: 0.1 },
        }
    }
}
