import numpy as np


class Layer:
    def __init__(self, input_size, output_size, activation):
        raise NotImplementedError()

    def forward(self, inputs):
        raise NotImplementedError()

    def backward(self, output_gradient):
        raise NotImplementedError()

    def update_weights(self, learning_rate):
        raise NotImplementedError()


class DenseLayer(Layer):
    def __init__(self, input_size, output_size, activation):
        self.weights = np.random.randn(input_size, output_size) * 0.1
        self.biases = np.zeros((1, output_size))
        self.activation = activation

    def forward(self, inputs):
        self.inputs = inputs if (inputs.ndim == 2 and inputs.shape[0] == 1) else inputs.reshape(1, -1)
        self.z = np.dot(self.inputs, self.weights) + self.biases
        self.a = self.activation.activate(self.z)
        return self.a

    def backward(self, output_gradient):
        output_gradient = output_gradient if (
                output_gradient.ndim == 2 and output_gradient.shape[0] == 1) else output_gradient.reshape(1, -1)
        activation_gradient = self.activation.derivative(self.z) * output_gradient
        weights_gradient = np.dot(self.inputs.T, activation_gradient)
        self.weights_gradient = weights_gradient
        self.biases_gradient = np.sum(activation_gradient, axis=0, keepdims=True)
        return np.dot(activation_gradient, self.weights.T)

    def update_weights(self, learning_rate):
        self.weights -= learning_rate * self.weights_gradient
        self.biases -= learning_rate * self.biases_gradient


class ConvolutionalLayer(Layer):
    def __init__(self, num_filters, filter_size, input_shape, stride=1, padding=0, activation=None):
        self.num_filters = num_filters
        self.filter_size = filter_size
        self.input_shape = input_shape  # (height, width)
        self.stride = stride
        self.padding = padding
        self.activation = activation
        self.filters = np.random.randn(num_filters, filter_size, filter_size) * 0.1
        self.biases = np.zeros(num_filters)
        
        # Calculate output dimensions
        self.output_height = (input_shape[0] - filter_size + 2 * padding) // stride + 1
        self.output_width = (input_shape[1] - filter_size + 2 * padding) // stride + 1

    def forward(self, inputs):
        # Store the input for backpropagation
        self.inputs = inputs
        batch_size = inputs.shape[0] if len(inputs.shape) > 2 else 1
        
        # Handle single sample vs batch
        if len(inputs.shape) == 2:
            inputs = inputs.reshape(1, *inputs.shape)
        
        # Pad the inputs if needed
        if self.padding > 0:
            padded_inputs = np.pad(inputs, ((0, 0), (self.padding, self.padding), (self.padding, self.padding)), 'constant')
        else:
            padded_inputs = inputs
            
        self.padded_inputs = padded_inputs
        
        # Initialize output volume
        output = np.zeros((batch_size, self.num_filters, self.output_height, self.output_width))
        
        # Perform convolution
        for b in range(batch_size):
            for f in range(self.num_filters):
                for i in range(0, self.output_height):
                    for j in range(0, self.output_width):
                        h_start = i * self.stride
                        h_end = h_start + self.filter_size
                        w_start = j * self.stride
                        w_end = w_start + self.filter_size
                        
                        # Extract the region from padded input
                        input_region = padded_inputs[b, h_start:h_end, w_start:w_end]
                        
                        # Apply filter
                        output[b, f, i, j] = np.sum(input_region * self.filters[f]) + self.biases[f]
        
        self.output = output
        
        # Apply activation if provided
        if self.activation:
            self.activated_output = np.array([self.activation.activate(output[b]) for b in range(batch_size)])
            # Reshape for the next layer (assuming Dense layer follows)
            return self.activated_output.reshape(batch_size, -1)
        else:
            # Reshape for the next layer (assuming Dense layer follows)
            return output.reshape(batch_size, -1)

    def backward(self, output_gradient):
        batch_size = self.inputs.shape[0] if len(self.inputs.shape) > 2 else 1
        
        # Reshape gradient to match output shape
        if self.activation:
            # Apply activation derivative
            output_gradient = output_gradient.reshape(batch_size, self.num_filters, self.output_height, self.output_width)
            output_gradient = np.array([self.activation.derivative(self.output[b]) * output_gradient[b] 
                                      for b in range(batch_size)])
        else:
            output_gradient = output_gradient.reshape(batch_size, self.num_filters, self.output_height, self.output_width)
        
        # Initialize gradients
        self.filters_gradient = np.zeros_like(self.filters)
        self.biases_gradient = np.zeros_like(self.biases)
        padded_inputs_gradient = np.zeros_like(self.padded_inputs)
        
        # Calculate gradients
        for b in range(batch_size):
            for f in range(self.num_filters):
                for i in range(self.output_height):
                    for j in range(self.output_width):
                        h_start = i * self.stride
                        h_end = h_start + self.filter_size
                        w_start = j * self.stride
                        w_end = w_start + self.filter_size
                        
                        # Update filter gradients
                        self.filters_gradient[f] += self.padded_inputs[b, h_start:h_end, w_start:w_end] * output_gradient[b, f, i, j]
                        
                        # Update bias gradients
                        self.biases_gradient[f] += output_gradient[b, f, i, j]
                        
                        # Update input gradients
                        padded_inputs_gradient[b, h_start:h_end, w_start:w_end] += self.filters[f] * output_gradient[b, f, i, j]
        
        # Remove padding from input gradients if needed
        if self.padding > 0:
            inputs_gradient = padded_inputs_gradient[:, self.padding:-self.padding, self.padding:-self.padding]
        else:
            inputs_gradient = padded_inputs_gradient
            
        return inputs_gradient.reshape(self.inputs.shape)

    def update_weights(self, learning_rate):
        self.filters -= learning_rate * self.filters_gradient
        self.biases -= learning_rate * self.biases_gradient
