use burn::{
    nn::{
        BatchNorm, BatchNormConfig, Dropout, DropoutConfig, Linear, LinearConfig, Relu,
        conv::{Conv2d, Conv2dConfig},
        loss::CrossEntropyLossConfig,
        pool::{AdaptiveAvgPool2d, MaxPool2d, MaxPool2dConfig},
    },
    prelude::*,
    tensor::backend::AutodiffBackend,
    train::{ClassificationOutput, TrainOutput, TrainStep, ValidStep},
};

use crate::data::batcher::HWDBBatch;

#[derive(Module, Debug)]
pub struct BasicModel<B: Backend> {
    conv1: Conv2d<B>,
    conv2: Conv2d<B>,
    conv3: Conv2d<B>,
    conv4: Conv2d<B>,
    conv5: Conv2d<B>,
    fc1: Linear<B>,
    fc2: Linear<B>,
    fc3: Linear<B>,
    max_pool: MaxPool2d,
    avg_pool: AdaptiveAvgPool2d,
    activation: Relu,
    dropout: Dropout,
    lrn1: BatchNorm<B, 2>,
    lrn2: BatchNorm<B, 2>,
}

impl<B: Backend> BasicModel<B> {
    pub fn forward(&self, images: Tensor<B, 3>) -> Tensor<B, 2> {
        let [batch_size, height, width] = images.dims();

        let images = images.reshape([batch_size, 1, height, width]);

        let x = self.conv1.forward(images);
        let x = self.activation.forward(x);
        let x = self.max_pool.forward(x);
        let x = self.lrn1.forward(x);

        let x = self.conv2.forward(x);
        let x = self.activation.forward(x);
        let x = self.max_pool.forward(x);
        let x = self.lrn2.forward(x);

        let x = self.conv3.forward(x);
        let x = self.activation.forward(x);

        let x = self.conv4.forward(x);
        let x = self.activation.forward(x);

        let x = self.conv5.forward(x);
        let x = self.activation.forward(x);

        let x = self.avg_pool.forward(x);
        let [_, c, h, w] = x.dims();
        let x = x.reshape([batch_size, c * h * w]);
        let x = self.dropout.forward(x);

        let x = self.fc1.forward(x);
        let x = self.activation.forward(x);
        let x = self.dropout.forward(x);

        let x = self.fc2.forward(x);
        let x = self.activation.forward(x);
        let x = self.dropout.forward(x);

        self.fc3.forward(x)
    }

    pub fn forward_classification(
        &self,
        images: Tensor<B, 3>,
        targets: Tensor<B, 1, Int>,
    ) -> ClassificationOutput<B> {
        let output = self.forward(images);
        let loss = CrossEntropyLossConfig::new()
            .init(&output.device())
            .forward(output.clone(), targets.clone());
        ClassificationOutput::new(loss, output, targets)
    }
}

impl<B: AutodiffBackend> TrainStep<HWDBBatch<B>, ClassificationOutput<B>> for BasicModel<B> {
    fn step(&self, item: HWDBBatch<B>) -> burn::train::TrainOutput<ClassificationOutput<B>> {
        let output = self.forward_classification(item.images, item.labels);
        TrainOutput::new(self, output.loss.backward(), output)
    }
}

impl<B: Backend> ValidStep<HWDBBatch<B>, ClassificationOutput<B>> for BasicModel<B> {
    fn step(&self, item: HWDBBatch<B>) -> ClassificationOutput<B> {
        self.forward_classification(item.images, item.labels)
    }
}

#[derive(Config, Debug)]
pub struct BasicModelConfig {
    #[config(default = 1)]
    pub in_channels: usize,
    #[config(default = 96)]
    pub out_channels: usize,
    #[config(default = 6)]
    pub feature_hid_size: usize,
    pub num_classes: usize,
    #[config(default = "0.5")]
    pub dropout_prob: f64,
}

impl BasicModelConfig {
    pub fn init<B: Backend>(&self, device: &B::Device) -> BasicModel<B> {
        let fc_size = self.out_channels * self.feature_hid_size * self.feature_hid_size;
        BasicModel {
            conv1: Conv2dConfig::new([self.in_channels, 32], [11, 11]).init(device),
            conv2: Conv2dConfig::new([32, 96], [5, 5]).init(device),
            conv3: Conv2dConfig::new([96, 128], [3, 3]).init(device),
            conv4: Conv2dConfig::new([128, 128], [3, 3]).init(device),
            conv5: Conv2dConfig::new([128, self.out_channels], [3, 3]).init(device),
            fc1: LinearConfig::new(fc_size, fc_size).init(device),
            fc2: LinearConfig::new(fc_size, fc_size).init(device),
            fc3: LinearConfig::new(fc_size, self.num_classes).init(device),
            max_pool: MaxPool2dConfig::new([2, 2]).with_strides([2, 2]).init(),
            avg_pool: AdaptiveAvgPool2d {
                output_size: [self.feature_hid_size, self.feature_hid_size],
            },
            activation: Relu::new(),
            dropout: DropoutConfig::new(self.dropout_prob).init(),
            lrn1: BatchNormConfig::new(32).init(device),
            lrn2: BatchNormConfig::new(96).init(device),
        }
    }
}
