import numpy as np
import struct
import os
import time

def show_matrix(mat, name):
    #print(name + str(mat.shape) + ' mean %f, std %f' % (mat.mean(), mat.std()))
    pass

def show_time(time, name):
    #print(name + str(time))
    pass

class ConvolutionalLayer(object):
    def __init__(self, kernel_size, channel_in, channel_out, padding, stride):
        # 卷积层的初始化
        self.kernel_size = kernel_size
        self.channel_in = channel_in
        self.channel_out = channel_out
        self.padding = padding
        self.stride = stride
    
    def init_param(self, std=0.01):  # 参数初始化
        self.weight = np.random.normal(loc=0.0, scale=std, size=(self.channel_in, self.kernel_size, self.kernel_size, self.channel_out))
        self.bias = np.zeros([self.channel_out])
    
    def forward(self, input):  # 前向传播的计算
        self.input = input  # [N, C, H, W]
        height = self.input.shape[2] + self.padding * 2
        width = self.input.shape[3] + self.padding * 2
        self.input_pad = np.zeros([self.input.shape[0], self.input.shape[1], height, width])
        self.input_pad[:, :, self.padding:self.padding+self.input.shape[2], self.padding:self.padding+self.input.shape[3]] = self.input
        
        height_out = (height - self.kernel_size) // self.stride + 1
        width_out = (width - self.kernel_size) // self.stride + 1
        self.output = np.zeros([self.input.shape[0], self.channel_out, height_out, width_out])
        for idxn in range(self.input.shape[0]):
            for idxc in range(self.channel_out):
                for idxh in range(height_out):
                    for idxw in range(width_out):
                        # TODO: 计算卷积层的前向传播，即特征图与卷积核的内积再加偏置
                        # 计算当前感受野的起始位置
                        h_start = idxh * self.stride
                        h_end = h_start + self.kernel_size
                        w_start = idxw * self.stride
                        w_end = w_start + self.kernel_size
                        # 提取局部感受野 [C, K, K]
                        receptive_field = self.input_pad[idxn, :, h_start:h_end, w_start:w_end]
                        self.output[idxn, idxc, idxh, idxw] = np.sum(
                                receptive_field * self.weight[:, :, :, idxc]  # 权重形状: [C_in, K, K, C_out]
                            ) + self.bias[idxc]
        return self.output
    
    def backward(self, top_diff): # 卷积层的反向传播
        self.d_weight = np.zeros(self.weight.shape)
        self.d_bias = np.zeros(self.bias.shape)
        bottom_diff = np.zeros(self.input_pad.shape)
        for idxn in range(top_diff.shape[0]):
            for idxc in range(top_diff.shape[1]):
                for idxh in range(top_diff.shape[2]):
                    for idxw in range(top_diff.shape[3]):
                        #TODO: 计算卷积层的反向传播，权重、偏置的梯度和本层损失
                        # 计算权重梯度
                        self.d_weight[:, :, :, idxc] += (
                            self.input_pad[idxn, :, 
                                        idxh*self.stride : idxh*self.stride+self.kernel_size,
                                        idxw*self.stride : idxw*self.stride+self.kernel_size] 
                            * top_diff[idxn, idxc, idxh, idxw]
                        )
                        # 计算偏置梯度
                        self.d_bias[idxc] += top_diff[idxn, idxc, idxh, idxw]
                        # ______________________
                        # 计算输入梯度
                        bottom_diff[idxn, :, 
                                idxh*self.stride : idxh*self.stride+self.kernel_size,
                                idxw*self.stride : idxw*self.stride+self.kernel_size] += (
                            self.weight[:, :, :, idxc] * top_diff[idxn, idxc, idxh, idxw]
                        ) # ______
        bottom_diff = bottom_diff[:, :, self.padding:self.padding+self.input.shape[2], self.padding:self.padding+self.input.shape[3]]
        return bottom_diff
    
    def load_param(self, weight, bias):  # 参数加载
        self.weight = weight
        self.bias = bias

class MaxPoolingLayer(object):
    def __init__(self, kernel_size, stride):  # 最大池化层的初始化
        self.kernel_size = kernel_size
        self.stride = stride
    
    def forward(self, input):  # 前向传播的计算
        start_time = time.time()
        self.input = input  # 输入形状: [N, C, H, W]
        self.max_index = np.zeros(self.input.shape)  # 记录最大值位置的掩码（可选）
        # 计算输出特征图大小
        height_out = (self.input.shape[2] - self.kernel_size) // self.stride + 1
        width_out = (self.input.shape[3] - self.kernel_size) // self.stride + 1
        self.output = np.zeros([self.input.shape[0], self.input.shape[1], height_out, width_out])
        # 最大池化计算
        for idxn in range(self.input.shape[0]):  # 遍历批次样本
            for idxc in range(self.input.shape[1]):  # 遍历通道
                for idxh in range(height_out):  # 遍历输出高度
                    for idxw in range(width_out):  # 遍历输出宽度
                        # 计算当前池化窗口的起始位置
                        h_start = idxh * self.stride
                        h_end = h_start + self.kernel_size
                        w_start = idxw * self.stride
                        w_end = w_start + self.kernel_size
                        # 提取当前窗口内的数据并取最大值
                        window = self.input[idxn, idxc, h_start:h_end, w_start:w_end]
                        self.output[idxn, idxc, idxh, idxw] = np.max(window)
        return self.output
    
    def backward(self, top_diff):  # 最大池化层的反向传播
        bottom_diff = np.zeros(self.input.shape)
        for idxn in range(top_diff.shape[0]):
            for idxc in range(top_diff.shape[1]):
                for idxh in range(top_diff.shape[2]):
                    for idxw in range(top_diff.shape[3]):
                        # TODO: 最大池化层的反向传播，计算池化窗口中最大值位置，并传递损失
                        # 获取当前池化窗口
                        h_start = idxh * self.stride
                        h_end = h_start + self.kernel_size
                        w_start = idxw * self.stride
                        w_end = w_start + self.kernel_size
                        window = self.input[idxn, idxc, h_start:h_end, w_start:w_end] # 找到前向传播时最大值的索引位置
                        max_index = np.unravel_index(np.argmax(window), window.shape)
                        # ______
                        bottom_diff[idxn, idxc, idxh*self.stride+max_index[0], idxw*self.stride+max_index[1]] = top_diff[idxn, idxc, idxh, idxw] # ______
        return bottom_diff

class FlattenLayer(object):
    def __init__(self, input_shape, output_shape):  # 层的初始化
        self.input_shape = input_shape  # 输入形状（未直接使用，可保留为接口）
        self.output_shape = output_shape  # 目标输出形状（例如 [C*H*W]）
    
    def forward(self, input):  # 前向传播的计算
        # 调整输入维度顺序（假设输入为 [N, C, H, W] -> 转为 [N, H, W, C]）
        self.input = np.transpose(input, [0, 2, 3, 1])  # 轴顺序调整为 [N, H, W, C]
        
        # 展平操作：保留批次维度，其余维度合并为 output_shape
        self.output = self.input.reshape([self.input.shape[0]] + list(self.output_shape))
        return self.output
