'''
【人工神经网络】

20251117(qing): 输入如下内容创建新的模型并且训练

python nn.py --size 400 200 -l 0.001 -t 10 -n 14208,10000,1000,200,2 -o human_20251117_0_0.model --kernel-size=5 --conv-stride=2 --pool-size=2 --pool-stride=2 --teacher-data=exs   --gpu

'''

import random
import pickle
import numpy
import numpy as np
import scipy.special
import argparse
import time
import json
import os
import glob
from abc import ABC, abstractmethod 
from skimage import io, transform, exposure
from skimage.util import img_as_float
import msgpack

# 全局变量，它应该在main()中进行初始化
gpu_mode = False

class ActiveFunctions:
    '''激活函数类定义'''

    # 激活函数是函数
    # FIXME: 不知道为什么，似乎只有它能用
    @staticmethod
    def sigmoid(x):
        return scipy.special.expit(x)
    
    # 其他常见激活函数（注释状态）
    # Tanh函数 - 输出范围在[-1, 1]之间，中心在0
    @staticmethod
    def tanh(x):
        return numpy.tanh(x)
    
    # ReLU函数 - 修正线性单元，计算简单，缓解梯度消失
    @staticmethod
    def relu(x):
        return numpy.maximum(0, x)
    
    # Leaky ReLU函数 - 带小斜率的ReLU，解决神经元死亡问题
    @staticmethod
    def leaky_relu(x):
        return numpy.maximum(0.01 * x, x)
    
    # ELU函数 - 指数线性单元，有负值，有助于平均值接近0
    @staticmethod
    def elu(x):
        return numpy.where(x > 0, x, 0.1 * (numpy.exp(x) - 1))

    # 空激活函数 - 不进行任何变换
    @staticmethod
    def identity(x):
        return x
    
    # Softmax函数 - 通常用于多分类输出层，输出概率分布
    def softmax(x):
        exp_x = np.exp(x - np.max(x))
        return exp_x / np.sum(exp_x)

    @staticmethod
    def softmax1(scores, ):
        exp_scores = np.exp(scores)
        probabilities = exp_scores/np.sum(exp_scores)
        return np.round(probabilities,4)

#作者：上海最后深情
#链接：https://www.nowcoder.com/discuss/801943637457137664
#来源：牛客网

    def _activation_derivative(activation_function, x):
        if activation_function.__name__ == 'sigmoid':
            # 计算sigmoid函数值
            s = 1 / (1 + np.exp(-x))
            # 返回逐元素导数（对角元素）
            return s * (1 - s)

        elif activation_function.__name__ == 'relu':
            return np.where(x > 0, 1.0, 0.0)
        
        elif activation_function.__name__ == 'softmax':
            # 计算softmax输出
            y = activation_function(x)
            # 确保y是一维数组
            y = y.flatten()
            n = y.shape[0]
            jacobian = np.zeros((n, n))
            if gpu_mode:
                jacobian = cp.array(jacobian)
            for i in range(n):
                for j in range(n):
                    if i == j:
                        jacobian[i, j] = y[i] * (1 - y[j])
                    else:
                        jacobian[i, j] = -y[i] * y[j]
            return jacobian
        
        else:
            raise ValueError("不支持的激活函数")


class Layer(ABC):
    '''层类接口定义'''

    @abstractmethod
    def get_input_shape(self) -> tuple:
        '''返回该层的输入形状'''
        ...
    
    @abstractmethod
    def get_output_shape(self) -> tuple:
        '''返回该层的输出形状'''
        ...

    @abstractmethod
    def get_weights(self):
        '''将该两层之间的权重输出'''
        ...
    
    @abstractmethod
    def get_info(self) -> dict:
        '''返回该层的所有信息，包括权重以及偏执'''
        ...

    @abstractmethod
    def forward(self, inputs):
        '''正向传播方法，返回最终输出'''
        pass

    @abstractmethod
    def backward(self, errors):
        '''反向传播方法，更新权重并返回上一层的误差（方便堆叠）'''
        pass

class NeuralNetwork(Layer):
    '''人工神经网络类定义'''

    def get_type(self) -> str:
        '''返回该层的类型'''
        return 'NeuralNetwork'
    
    def get_input_shape(self) -> tuple:
        '''返回该层的输入形状'''
        return self.inodes, 1
    
    def get_output_shape(self) -> tuple:
        '''返回该层的输出形状'''
        return self.onodes, 1
    
    def get_info(self) -> dict:
        '''返回该层的所有信息，包括权重以及偏执'''
        return {
            'layer_type': self.get_type(),
            'inputs_shape': list(self.get_input_shape()),
            'outputs_shape': list(self.get_output_shape()),
            'weights': self.get_weights(),
            'bias': self.get_bias(),
            'activation_function': self.activation_function.__name__,   
            'learning_rate': self.lr
        }
    
    def get_weights(self):
        '''将该两层之间的权重输出'''
        return self.weights.tolist()
    
    def get_bias(self):
        '''获取偏置'''
        return self.bias.tolist()
    
    def set_bias(self, bias):
        '''设置偏置'''
        self.bias = bias

    def __init__(self, weights, bias, learning_rate: float, 
                 activation_function: ActiveFunctions = ActiveFunctions.sigmoid, 
                 gpu_mode=False):
        self.onodes, self.inodes = weights.shape
        self.weights = weights
        self.bias = bias
        self.lr = learning_rate
        self.activation_function = activation_function
        self.gpu_mode = gpu_mode
        self.np = cp if gpu_mode else np
        
        # 初始化存储变量
        self.pre_inputs = None
        self.pre_activation_inputs = None
        self.pre_outputs = None

    def forward(self, inputs):
        _inputs = self.weights @ inputs + self.bias.reshape(-1, 1)
        outputs = self.activation_function(_inputs)
        
        self.pre_inputs = inputs
        self.pre_activation_inputs = _inputs
        self.pre_outputs = outputs
        
        return outputs

    def backward(self, errors):
        batch_size = errors.shape[1]
        inputs = self.pre_inputs
        
        if self.activation_function.__name__ == 'softmax':
            return self._backward_softmax(errors, batch_size, inputs)
        else:
            return self._backward_standard(errors, batch_size, inputs)

    def _backward_standard(self, errors, batch_size, inputs):
        activation_derivative = ActiveFunctions._activation_derivative(
            self.activation_function, self.pre_activation_inputs
        )
        
        first = errors * activation_derivative
        self.weights_grad = (first @ inputs.T) / batch_size
        self.bias_grad = self.np.sum(first, axis=1) / batch_size
        uplevel_errors = self.weights.T @ first
        
        self.update()
        return uplevel_errors

    def _backward_softmax(self, errors, batch_size, inputs):
        weights_grad = self.np.zeros_like(self.weights)
        bias_grad = self.np.zeros_like(self.bias)
        uplevel_errors = self.np.zeros((self.inodes, batch_size))
        
        for i in range(batch_size):
            sample_input = inputs[:, i:i+1]
            sample_output = self.pre_outputs[:, i]
            sample_error = errors[:, i:i+1]
            
            jacobian = ActiveFunctions._activation_derivative(
                self.activation_function, sample_output
            )
            
            grad_contribution = jacobian @ sample_error
            weights_grad += grad_contribution @ sample_input.T
            bias_grad += grad_contribution.flatten()
            uplevel_errors[:, i:i+1] = self.weights.T @ grad_contribution
        
        self.weights_grad = weights_grad / batch_size
        self.bias_grad = bias_grad / batch_size
        
        self.update()
        return uplevel_errors

    def update(self):
        # 经验证，这里需要使用加号
        self.weights += self.lr * self.weights_grad
        self.bias -= self.lr * self.bias_grad

###############################################
#  CNN 卷积神经网络

class Conv(Layer):
    '''2D卷积层类定义'''

    def get_type(self) -> str:
        '''返回该层的类型'''
        return 'Conv'
    
    def get_input_shape(self) -> tuple:
        '''返回该层的输入形状'''
        if self._input_shape is not None:
            return self._input_shape
        # 如果没有缓存的输入形状，返回基于参数的形状估计
        return (1, self.in_channels, 0, 0)  # 高度和宽度未知
    
    def get_output_shape(self) -> tuple:
        '''返回该层的输出形状'''
        if self._output_shape is not None:
            return self._output_shape
        # 如果没有缓存的输出形状，返回基于参数的形状估计
        return (1, self.out_channels, 0, 0)  # 高度和宽度未知

    def get_weights(self):
        '''将该两层之间的权重输出'''
        return self.weights.tolist()

    def get_info(self) -> dict:
        '''返回该层的所有信息，包括权重以及偏执'''
        return {
            'layer_type': self.get_type(),
            'inputs_shape': list(self.get_input_shape()),
            'outputs_shape': list(self.get_output_shape()),
            'weights': self.get_weights(),
            'bias': self.get_bias(),
            'activation_function': self.activation_function.__name__,   
            'learning_rate': self.learning_rate,
            'kernel_size': self.kernel_size,
            'stride': self.stride,
            'padding': self.padding,
        }

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, learning_rate=0.01, activation_function=ActiveFunctions.relu):
        if isinstance(kernel_size, int):
            kernel_size = (kernel_size, kernel_size)
        
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.learning_rate = learning_rate
        
        # 激活函数 - 默认为None（无激活函数）
        self.activation_function = activation_function
        
        # 权重和偏置 - 初始化为None，通过set_weights外部设置
        self.weights = None
        self.bias = None
        
        # 输入输出形状缓存
        self._input_shape = None
        self._output_shape = None
        
        self.input = None
        self.col = None
        self.pre_activation_output = None  # 存储激活前的输出，用于反向传播
    
    
    def get_bias(self):
        """获取偏置"""
        return self.bias.tolist()
    
    def set_weights(self, weights, bias):
        """设置权重和偏置"""
        #print('Conv weights mean: ', numpy.mean(weights))
        self.weights = weights
        self.bias = bias

    def im2col(self, x):
        """向量化的im2col实现"""
        batch_size, in_channels, in_height, in_width = x.shape

        # 计算输出尺寸
        padded_height = in_height + 2 * self.padding
        padded_width = in_width + 2 * self.padding
        
        out_height = (padded_height - self.kernel_size[0]) // self.stride + 1
        out_width = (padded_width - self.kernel_size[1]) // self.stride + 1

        # 添加padding
        if self.padding > 0:
            x_padded = np.pad(x, ((0, 0), (0, 0), 
                                (self.padding, self.padding), 
                                (self.padding, self.padding)), mode='constant')
        else:
            x_padded = x
        
        # 使用stride_tricks创建视图
        strides = (x_padded.strides[0], 
                x_padded.strides[1], 
                self.stride * x_padded.strides[2], 
                self.stride * x_padded.strides[3], 
                x_padded.strides[2], 
                x_padded.strides[3])
        
        shape = (batch_size, in_channels, out_height, out_width, 
                self.kernel_size[0], self.kernel_size[1])

        patches = np.lib.stride_tricks.as_strided(
            x_padded, shape=shape, strides=strides, writeable=False
        )
        # 重塑为列矩阵
        col = patches.reshape(batch_size, in_channels * self.kernel_size[0] * self.kernel_size[1], 
                            out_height * out_width)

        return col, out_height, out_width
    
    def col2im(self, col, input_shape):
        """将列矩阵转换回图像格式"""
        batch_size, in_channels, in_height, in_width = input_shape
        
        # 创建输出图像
        if self.padding > 0:
            dx_padded = np.zeros((batch_size, in_channels, 
                                in_height + 2 * self.padding, 
                                in_width + 2 * self.padding))
        else:
            dx_padded = np.zeros((batch_size, in_channels, in_height, in_width))
        
        out_height = (in_height + 2 * self.padding - self.kernel_size[0]) // self.stride + 1
        out_width = (in_width + 2 * self.padding - self.kernel_size[1]) // self.stride + 1
        
        for b in range(batch_size):
            col_idx = 0
            for i in range(0, in_height + 2 * self.padding - self.kernel_size[0] + 1, self.stride):
                for j in range(0, in_width + 2 * self.padding - self.kernel_size[1] + 1, self.stride):
                    patch = col[b, :, col_idx].reshape(in_channels, self.kernel_size[0], self.kernel_size[1])
                    dx_padded[b, :, i:i+self.kernel_size[0], j:j+self.kernel_size[1]] += patch
                    col_idx += 1
        
        # 移除padding
        if self.padding > 0:
            return dx_padded[:, :, self.padding:-self.padding, self.padding:-self.padding]
        return dx_padded

    def forward(self, x):
        """前向传播，使用权重进行卷积计算"""
        # 缓存输入
        self.input = x
        self._input_shape = x.shape
        batch_size = x.shape[0]
        
        # im2col转换
        self.col, out_height, out_width = self.im2col(x)

        # 缓存输出形状
        self._output_shape = (batch_size, self.out_channels, out_height, out_width)
        
        # 重塑权重为2D
        weights_2d = self.weights.reshape(self.out_channels, -1)
        
        # 计算输出
        output = np.zeros((batch_size, self.out_channels, out_height, out_width))
        
        for b in range(batch_size):
            # 矩阵乘法
            conv_result = weights_2d @ self.col[b]  # (out_channels, out_h * out_w)
            conv_result = conv_result.reshape(self.out_channels, out_height, out_width)
            conv_result += self.bias.reshape(-1, 1, 1)  # 添加偏置
            
            output[b] = conv_result

        # 缓存激活前的输出（用于反向传播）
        self.pre_activation_output = output.copy()
        
        # 应用激活函数
        if self.activation_function is not None:
            output = self.activation_function(output)
        
        return output
            
    def backward(self, dout):
        """改进的反向传播"""
        batch_size = dout.shape[0]

        # 计算激活函数的导数
        if self.activation_function is not None:
            activation_derivative = ActiveFunctions._activation_derivative(
                self.activation_function, self.pre_activation_output)
            dout = dout * activation_derivative

        # 初始化梯度
        self.weights_grad = np.zeros_like(self.weights)
        self.bias_grad = np.zeros_like(self.bias)
        
        # 重塑权重
        weights_2d = self.weights.reshape(self.out_channels, -1)

        # 计算权重梯度和偏置梯度（去掉不必要的归一化）
        for b in range(batch_size):
            dout_reshaped = dout[b].reshape(self.out_channels, -1)
            weights_grad_2d = dout_reshaped @ self.col[b].T  # 去掉空间归一化
            self.weights_grad += weights_grad_2d.reshape(self.weights.shape)
            self.bias_grad += np.sum(dout[b], axis=(1, 2))   # 去掉空间归一化

        # 只进行批量归一化
        self.weights_grad /= batch_size
        self.bias_grad /= batch_size
        
        # 计算输入梯度
        dx = np.zeros_like(self.input)
        for b in range(batch_size):
            dout_reshaped = dout[b].reshape(self.out_channels, -1)
            dx_col = weights_2d.T @ dout_reshaped
            dx_col_reshaped = dx_col.reshape(1, dx_col.shape[0], dx_col.shape[1])
            dx[b] = self.col2im(dx_col_reshaped, self.input[b:b+1].shape)[0]
        
        self.update()
        return dx
              
    def update(self):
        """权重和偏置更新"""

        #print(f'Conv weights_grad mean: {np.mean(self.weights_grad)}')
        #print(f'Conv bias_grad mean: {np.mean(self.bias_grad)}')

        if hasattr(self, 'weights_grad') and self.weights_grad is not None:
            self.weights += self.learning_rate * self.weights_grad
            
        if hasattr(self, 'bias_grad') and self.bias_grad is not None:
            self.bias -= self.learning_rate * self.bias_grad
            
class MaxPool(Layer):

    def get_type(self) -> str:
        '''返回该层的类型'''
        return 'MaxPool'

    def get_input_shape(self) -> tuple:
        '''返回该层的输入形状'''
        return self._input_shape
    
    def get_output_shape(self) -> tuple:
        '''返回该层的输出形状'''
        return self._output_shape
    
    def get_weights(self):
        '''该层无权重'''
        return None

    def get_info(self) -> dict:
        '''返回该层的所有信息'''
        return {
            'layer_type': self.get_type(),
            'inputs_shape': list(self.get_input_shape()) if self.get_input_shape() is not None else None,
            'outputs_shape': list(self.get_output_shape()) if self.get_output_shape() is not None else None,
            'pool_size': self.pool_size,
            'stride': self.stride,
            'padding': self.padding,
        }

    def __init__(self, pool_size=2, stride=2, padding=0):
        self.pool_size = pool_size
        self.stride = stride
        self.padding = padding
        self.input = None
        self._input_shape = None
        self._output_shape = None

    def forward(self, x):
        """向量化的最大池化前向传播（性能更好）"""
        self.input = x
        batch_size, channels, in_height, in_width = x.shape
        self._input_shape = x.shape
        
        # 计算输出尺寸
        padded_height = in_height + 2 * self.padding
        padded_width = in_width + 2 * self.padding
        
        out_height = (padded_height - self.pool_size) // self.stride + 1
        out_width = (padded_width - self.pool_size) // self.stride + 1
        self._output_shape = (batch_size, channels, out_height, out_width)
        
        # 添加padding
        if self.padding > 0:
            x_padded = np.pad(x, ((0, 0), (0, 0), 
                                (self.padding, self.padding), 
                                (self.padding, self.padding)), mode='constant')
        else:
            x_padded = x
        
        # 使用stride_tricks创建窗口视图
        shape = (batch_size, channels, out_height, out_width, self.pool_size, self.pool_size)
        strides = (x_padded.strides[0], x_padded.strides[1], 
                self.stride * x_padded.strides[2], self.stride * x_padded.strides[3],
                x_padded.strides[2], x_padded.strides[3])
        
        windows = np.lib.stride_tricks.as_strided(
            x_padded, shape=shape, strides=strides, writeable=False
        )
        # 计算最大值和位置
        output = np.max(windows, axis=(4, 5))
        # 找到最大值位置（相对位置）
        max_indices_rel = np.argmax(windows.reshape(batch_size, channels, out_height, out_width, -1), axis=4)
        max_pos_rel = np.unravel_index(max_indices_rel, (self.pool_size, self.pool_size))
        
        # 转换为绝对位置
        h_coords = np.arange(out_height)[:, None] * self.stride + max_pos_rel[0]
        w_coords = np.arange(out_width)[None, :] * self.stride + max_pos_rel[1]
        
        max_indices = np.stack([h_coords, w_coords], axis=-1)
        max_indices = np.broadcast_to(max_indices, (batch_size, channels, out_height, out_width, 2))

        
        
        self.cache = (x_padded, max_indices)
        return output

    def backward(self, dout):
        """
        2D最大池化的反向传播
        """
        x_padded, max_indices = self.cache
        batch_size, channels, out_height, out_width = dout.shape
        
        # 初始化输入梯度（使用填充后的形状）
        dx_padded = np.zeros_like(x_padded)
        
        for b in range(batch_size):
            for c in range(channels):
                for oh in range(out_height):
                    for ow in range(out_width):
                        # 获取最大值在填充后图像中的位置
                        max_h, max_w = max_indices[b, c, oh, ow]
                        dx_padded[b, c, max_h, max_w] += dout[b, c, oh, ow]
        
        # 移除padding（如果存在）
        if self.padding > 0:
            dx = dx_padded[:, :, self.padding:-self.padding, self.padding:-self.padding]
        else:
            dx = dx_padded
        
        return dx

class Flatten(Layer):
    '''展平层'''
    
    def get_type(self) -> str:
        '''返回该层的类型'''
        return 'Flatten'
    
    def get_input_shape(self) -> tuple:
        '''返回该层的输入形状'''
        return None
    
    def get_output_shape(self) -> tuple:
        '''返回该层的输出形状'''
        return None
    
    def get_weights(self):
        '''该层无权重'''
        return None
    
    def get_info(self) -> dict:
        '''返回该层的信息'''
        return {
            'layer_type': self.get_type(),
        }

    def forward(self, x):
        '''前向传播'''
        batch_size = x.shape[0]
        self.cache = x.shape
        output = x.reshape(batch_size, -1).T
        return output

    def backward(self, dout):
        '''反向传播'''
        return dout.reshape(self.cache)

###############################################
#  可堆叠神经网络

class StackingNN:
    '''堆叠的神经网络'''
    
    def insert(self, layer: Layer):
        '''在神经网络中插入一个层'''
        self.vec.append(layer)


    #def __init__(self,
    #    in_channels=3, out_channels=8, kernel_size=3, convStride=1, padding=0,
    #    pool_size=2, poolStride=2,
    #    weights: list=None, learning_rate: float = None, dropout_rate=0.0):
    def __init__(self, dropout_rate=0.0):
        '''构造堆叠的神经网络'''
            
        # 这个向量主要用于存储神经层
        self.vec = []

        # 记录 dropout 率（这个参数的意思是在训练时，每个神经元以 1-dropout_rate 的概率被“dropout”，即被设置为 0）
        self.dropout_rate = dropout_rate



    def train(self, inputs, targets: list):
        '''训练一次神经网络组'''

        # 转换输入列表到二维数组
        inputs_now = inputs

        # 计算每一层的输出
        for n in self.vec:
            if gpu_mode and n.get_type() == 'Flatten':
                inputs_now = cp.asarray(inputs_now)
            inputs_now = n.forward(inputs_now)
        
        # 最后的输出
        final_outs = inputs_now
        # FIXME(20251116): -3层之前的输出有问题出现大量0值，
        #   输出总是在0.5徘徊，我怀疑输入数据有问题
        #   输入没问题 卷积层的输入有许多0
        # 似乎是权重设置区间的问题

        # 堆叠式反向传播
        err = self.calc_err(targets, final_outs)

        err_now = err
        for i in range(len(self.vec)).__reversed__():
            if gpu_mode and self.vec[i].get_type() == 'Flatten':
                err_now = err_now.get()
            n = self.vec[i]
            err_now = n.backward(err_now)

        return final_outs



    def query(self, inputs):
        '''堆叠式前向反馈'''
        # 
        inputs_now = inputs

        for n in self.vec:
            # 接力检索
            inputs_now =  n.forward(inputs_now)

        return inputs_now
    
    def get_info(self) -> dict:
        '''返回这个神经网络模型的所有信息'''
        info = {
            'type': 'Stacking Neural Network',
            'dropout_rate': self.dropout_rate,
            'layers': [n.get_info() for n in self.vec]
        }
        return info

    
    def get_interf_num(self):
        '''获取输入数量'''

        if len(self.vec) < 1:
            return -1
        
        input_num, _ = self.vec[5].get_input_shape()

        output_num, _ = self.vec[-1].get_output_shape()

        return  input_num, output_num



    def calc_err(self, targets, outputs):
        '''计算误差方法，返回误差矩阵'''

        # 误差是 (target - actual)
        return targets - outputs
            
    
    class NNExp(Exception):
        '''非法的接口，接口不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    
    class InvalidConfigure(NNExp):
        '''非法的配置，配置不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    
    class InvalidInterface(NNExp):
        '''非法的接口，接口不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    class InvalidLayerType(NNExp):
        '''非法的层类型，层类型不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

###############################################
# 归一化器

class ImageNormalizer:
    """
    图像归一化工具类，提供多种图像归一化方法
    使用scikit-image库进行图像处理
    """
    
    def __init__(self, target_size=None):
        """
        初始化图像归一化器
        
        参数:
            target_size: tuple, 可选，目标图像尺寸 (width, height)
        """
        self.target_size = target_size
    
    def load_image(self, image_path):
        """
        加载图像并转换为浮点型
        
        参数:
            image_path: str, 图像文件路径
            
        返回:
            image: numpy.ndarray, 加载并转换后的图像 (H, W, C) 或 (H, W)
        """
        image = io.imread(image_path)
        # 转换为浮点型，范围[0, 1]
        image = img_as_float(image)
        #print(f"加载后图像形状: {image.shape}")
        
        # 如果指定了目标尺寸，进行图像缩放
        if self.target_size is not None:
            #print(f"原始目标尺寸参数 (宽度, 高度): {self.target_size}")
            # skimage.transform.resize的参数是 (高度, 宽度)
            resize_size = (self.target_size[1], self.target_size[0])
            #print(f"转换后resize尺寸 (高度, 宽度): {resize_size}")
            image = transform.resize(image, resize_size, anti_aliasing=True)
            #print(f"缩放后图像形状: {image.shape}")
        
        return image
    
    def min_max_normalization(self, image, min_val=0.0, max_val=1.0):
        """
        最小-最大归一化
        将图像像素值缩放到指定范围[min_val, max_val]
        
        参数:
            image: numpy.ndarray, 输入图像
            min_val: float, 归一化后的最小值
            max_val: float, 归一化后的最大值
            
        返回:
            normalized_image: numpy.ndarray, 归一化后的图像
        """
        img_min = image.min()
        img_max = image.max()
        
        # 避免除零错误
        if img_max == img_min:
            return np.full_like(image, min_val)
        
        normalized_image = (image - img_min) / (img_max - img_min) * (max_val - min_val) + min_val
        return normalized_image
    
    def mean_std_normalization(self, image, mean=None, std=None):
        """
        均值-标准差归一化
        将图像像素值转换为均值为0，标准差为1的分布
        
        参数:
            image: numpy.ndarray, 输入图像
            mean: float or None, 自定义均值，None则使用图像均值
            std: float or None, 自定义标准差，None则使用图像标准差
            
        返回:
            normalized_image: numpy.ndarray, 归一化后的图像
        """
        if mean is None:
            mean = image.mean()
        if std is None:
            std = image.std()
        
        # 避免除零错误
        if std == 0:
            return np.full_like(image, 0.0)
        
        normalized_image = (image - mean) / std
        return normalized_image
    
    def percentile_normalization(self, image, lower=1, upper=99):
        """
        百分位归一化
        使用百分位裁剪来处理异常值，然后进行最小-最大归一化
        
        参数:
            image: numpy.ndarray, 输入图像
            lower: float, 较低百分位 (0-100)
            upper: float, 较高百分位 (0-100)
            
        返回:
            normalized_image: numpy.ndarray, 归一化后的图像
        """
        # 获取百分位值
        low_val, high_val = np.percentile(image, [lower, upper])
        
        # 裁剪图像
        clipped_image = np.clip(image, low_val, high_val)
        
        # 最小-最大归一化到[0, 1]
        normalized_image = self.min_max_normalization(clipped_image, 0.0, 1.0)
        return normalized_image
    
    def histogram_equalization(self, image):
        """
        直方图均衡化
        增强图像对比度
        
        参数:
            image: numpy.ndarray, 输入图像
            
        返回:
            equalized_image: numpy.ndarray, 均衡化后的图像
        """
        if image.ndim == 3:
            # 彩色图像，对每个通道分别进行均衡化
            equalized_channels = []
            for channel in range(image.shape[2]):
                equalized_channel = exposure.equalize_hist(image[:, :, channel])
                equalized_channels.append(equalized_channel)
            equalized_image = np.stack(equalized_channels, axis=2)
        else:
            # 灰度图像
            equalized_image = exposure.equalize_hist(image)
        
        return equalized_image
    
    def adaptive_histogram_equalization(self, image, clip_limit=0.03, kernel_size=None):
        """
        自适应直方图均衡化
        局部对比度增强
        
        参数:
            image: numpy.ndarray, 输入图像
            clip_limit: float, 裁剪限制，控制对比度增强程度
            kernel_size: tuple or None, 局部区域大小
            
        返回:
            equalized_image: numpy.ndarray, 均衡化后的图像
        """
        if image.ndim == 3:
            # 彩色图像，对每个通道分别进行均衡化
            equalized_channels = []
            for channel in range(image.shape[2]):
                equalized_channel = exposure.equalize_adapthist(
                    image[:, :, channel], clip_limit=clip_limit, kernel_size=kernel_size
                )
                equalized_channels.append(equalized_channel)
            equalized_image = np.stack(equalized_channels, axis=2)
        else:
            # 灰度图像
            equalized_image = exposure.equalize_adapthist(
                image, clip_limit=clip_limit, kernel_size=kernel_size
            )
        
        return equalized_image
    
###############################################
if __name__ == "__main__":
    # C:\Users\Administrator\AppData\Local\Programs\Python\Python310\python ./nn.py -l 0.3 -t 100 -n 1000,10000,10000,1000 --gpu
    parser = argparse.ArgumentParser('This is a neural network program.')
    #######################################
    # 归一化参数
    parser.add_argument('method', type=str, choices=['minmax', 'meanstd', 'percentile', 'hist', 'adaptive_hist', 'conv_pool', 'check'],
                        help='归一化方法: minmax(最小-最大), meanstd(均值-标准差), percentile(百分位), hist(直方图均衡化), adaptive_hist(自适应直方图均衡化), conv_pool(卷积池化处理), check(检查图像属性)')
    
    parser.add_argument('--size', type=int, nargs=2, metavar=('WIDTH', 'HEIGHT'),
                        help='目标图像尺寸 (宽度 高度)', required=True)
    
    # 方法特定参数
    parser.add_argument('--min', type=float, default=0.0, help='最小-最大归一化的最小值 (默认: 0.0)')
    parser.add_argument('--max', type=float, default=1.0, help='最小-最大归一化的最大值 (默认: 1.0)')
    parser.add_argument('--lower', type=float, default=1.0, help='百分位归一化的低百分位 (默认: 1.0)')
    parser.add_argument('--upper', type=float, default=99.0, help='百分位归一化的高百分位 (默认: 99.0)')
    parser.add_argument('--clip-limit', type=float, default=0.03, help='自适应直方图均衡化的裁剪限制 (默认: 0.03)')
    #######################################
    # 神经网络参数
    parser.add_argument('-n', type=str, help='Input a list of integers splited by comma.')
    parser.add_argument('-i', type=str, help='Input a hex file as neural network model.')
    parser.add_argument('-o', type=str, help='Output the model to a file as hex.')
    parser.add_argument('-l', type=float, help='Set the learnning rate of the neural netword.', required=True)
    parser.add_argument('-t', type=int, help='How many times you want to run.', required=True)
    parser.add_argument('--gpu', action='store_true', help='Use gpu to simulate.') 
    parser.add_argument('--teacher-data', type=str, help='Teacher data input')
    parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate (0.0 to 1.0), default is 0.0 (no dropout)')
    parser.add_argument('--in-channels', type=int, default=3, help='In channels, default is 3')
    parser.add_argument('--out-channels', type=int, default=8, help='Out channels, default is 8')
    parser.add_argument('--kernel-size', type=int, default=3, help='Kernel size, default is 3')
    parser.add_argument('--padding', type=int, default=1, help='Padding, default is 0')
    parser.add_argument('--conv-stride', type=int, default=2, help='Conv stride, default is 1')
    parser.add_argument('--pool-size', type=int, default=2, help='Pool size, default is 4')
    parser.add_argument('--pool-stride', type=int, default=2, help='Pool stride, default is 3')
    parser.add_argument('-v', action='store_true', help='Verbose mode')
    args = parser.parse_args() # 解析参数
    
    # 学习率
    if args.l: learning_rate = args.l

    if args.gpu:
        gpu_mode = True
        import cupy as cp

    if args.n and args.i:   # 不能同时设置输入文件与新的模型
        print('You can not set -n and -i both onetime.')
        quit()
    if not args.n and not args.i:   # 必须输入模型
        print('You must set model.  Use -n or -i.')
        quit()

    if args.t != None:  # 学习次数
        t = args.t

    if args.i:  # 加载神经网络模型列表
        # with open(args.i, 'rb') as f:
        #     info = pickle.load(f)
        with open(args.i, 'rb') as f:
            info = msgpack.unpack(f)
            # 检查模型的形状……
            #
            if 'type' not in info.keys() or info['type'] != 'Stacking Neural Network':
                print('Invalid model file.')
                quit()

            s = StackingNN(info['dropout_rate'])
            for i in info['layers']:    # FIXME: 输出模型的信息
                if i['layer_type'] == 'Conv': 
                    #print(i.keys())
                    sample_num, in_channels, _, _ = i['inputs_shape']
                    _, out_channels, _, _ = i['outputs_shape']
                    weights = numpy.array(i['weights'])
                    bias = numpy.array(i['bias'])
                    #if gpu_mode:
                    #    weights = cp.asarray(weights)
                    #    bias = cp.asarray(bias)
                    # 形状检查
                    expected_shape = (out_channels, in_channels, *i['kernel_size'])
                    if not expected_shape == weights.shape:
                        raise StackingNN.InvalidInterface(f'Invalid interface: {i["layer_type"]} weights shape: {weights.shape}. Expected: {expected_shape}')
                    if not (out_channels,) == bias.shape:
                        raise StackingNN.InvalidInterface(f'Invalid interface: {i["layer_type"]} bias shape: {bias.shape}.')
                    # 创建卷积层，支持外部权重初始化
                    conv = Conv(in_channels, out_channels,
                     i['kernel_size'], i['stride'], i['padding'],
                     learning_rate, getattr(ActiveFunctions, i['activation_function']))
                    conv.set_weights(weights, bias)
                    s.insert(conv)
                elif i['layer_type'] == 'MaxPool':        
                    s.insert(MaxPool(i['pool_size'], i['stride']))
                elif i['layer_type'] == 'Flatten':
                    s.insert(Flatten())
                elif i['layer_type'] == 'NeuralNetwork':
                    inputs_num, _ = i['inputs_shape']
                    outputs_num,_ = i['outputs_shape']
                    weights = numpy.array(i['weights'])
                    bias = numpy.array(i['bias'])
                    if gpu_mode:
                        weights = cp.asarray(weights)
                        bias = cp.asarray(bias)
                    # 形状检查
                    if not (outputs_num,inputs_num) == weights.shape:
                        raise StackingNN.InvalidInterface('Invalid interface.')
                    if not (outputs_num,) == bias.shape:
                        raise StackingNN.InvalidInterface('Invalid interface.')
                    nn = NeuralNetwork(weights, bias, learning_rate, getattr(ActiveFunctions, i['activation_function']), args.gpu)
                    s.insert(nn)
                else:
                    raise StackingNN.InvalidLayerType('Invalid layer type.')

    if args.n:  # 创建新模型时使用的神经网络规模
        lyrs = [int(i) for i in str(args.n).split(',')]

        # 创建可堆叠神经元组
        s = StackingNN(args.dropout)
        
        # 当场计算卷积层权重 - 使用Kaiming初始化（适合ReLU）
        # FIXME: 建议封装初始化器
        #
        # 当场生成卷积层权重，不再从外部权重列表提取
        
        # 正态分布随机生成
        #conv1_weights_shape = (args.out_channels, args.in_channels, args.kernel_size, args.kernel_size)
        #conv1_weights = numpy.random.uniform(-0.5, 0.5, conv1_weights_shape)
        #conv1_bias = numpy.zeros(args.out_channels)

        # 第二个卷积层权重同样使用-0.5到0.5的均匀分布
        #conv2_weights_shape = (args.out_channels * 2, args.out_channels, args.kernel_size, args.kernel_size)
        #conv2_weights = numpy.random.uniform(-0.5, 0.5, conv2_weights_shape)
        #conv2_bias = numpy.zeros(args.out_channels * 2)


        # 计算第一个卷积层的权重形状：(out_channels, in_channels, kernel_height, kernel_width)
        conv1_weights_shape = (args.out_channels, args.in_channels, args.kernel_size, args.kernel_size)
        fan_in = args.in_channels * args.kernel_size * args.kernel_size
        std = np.sqrt(2.0 / fan_in)  # Kaiming初始化标准差
        conv1_weights = numpy.random.normal(0.0, std, conv1_weights_shape)
        conv1_bias = numpy.zeros(args.out_channels)

        # 使用Kaiming初始化第二个卷积层权重（同样适合ReLU）
        conv2_weights_shape = (args.out_channels * 2, args.out_channels, args.kernel_size, args.kernel_size)
        fan_in2 = args.out_channels * args.kernel_size * args.kernel_size
        std2 = np.sqrt(2.0 / fan_in2)
        conv2_weights = numpy.random.normal(0.0, std2, conv2_weights_shape)
        conv2_bias = numpy.zeros(args.out_channels * 2)
        
        
        print('conv1_weights mean: ', numpy.mean(conv1_weights))
        # 这两层用gpu速度很慢
        #if gpu_mode:
        #    conv1_weights = cp.asarray(conv1_weights)
        #    conv1_bias = cp.asarray(conv1_bias)
        #    conv2_weights = cp.asarray(conv2_weights)
        #    conv2_bias = cp.asarray(conv2_bias)
        
        # 创建卷积层并设置权重
        conv1 = Conv(args.in_channels, args.out_channels, args.kernel_size, args.conv_stride, args.padding, learning_rate, ActiveFunctions.relu)
        conv1.set_weights(conv1_weights, conv1_bias) 
        s.insert(conv1)
        
        s.insert(MaxPool(args.pool_size, args.pool_stride))
        
        conv2 = Conv(args.out_channels, args.out_channels*2, args.kernel_size, args.conv_stride, args.padding, learning_rate, ActiveFunctions.relu)
        conv2.set_weights(conv2_weights, conv2_bias)
        s.insert(conv2)
        
        s.insert(MaxPool(args.pool_size, args.pool_stride))
        s.insert(Flatten())
        
        # 生成全连接层权重和偏置 - 使用纯随机初始化
        weights = []
        biases = []
        for i in range(len(lyrs)-1):
            # 随机权重可能会造成训练结果的不平衡
            # 还是得考虑生态分布的方式

            if i == len(lyrs)-2:
                # 似乎输出层初始化范围不能太大，否则会出现很小的结果
                #layer_weights = numpy.random.rand(lyrs[i+1], lyrs[i]).T * 0.0625 - 0.03125

                # Xavier/Glorot 初始化：根据输入输出维度自动调整权重范围，保持方差一致
                # FIXME: 我的神经网络似乎 与它不配套
                #
                # 似乎可以用在新的softmax层(20251118)
                scale = numpy.sqrt(6.0 / (lyrs[i] + lyrs[i+1]))
                # 限制scale不超过1.0，确保范围不超出[-1.0, 1.0]
                scale = min(scale, 1.0)
                layer_weights = numpy.random.uniform(-scale, scale, (lyrs[i], lyrs[i+1]))


            else:
                #layer_weights = numpy.random.rand(lyrs[i+1], lyrs[i]).T * 0.2 - 0.1  # 减小权重初始化范围
                
                scale = numpy.sqrt(6.0 / (lyrs[i] + lyrs[i+1]))
                # 限制scale不超过1.0，确保范围不超出[-1.0, 1.0]
                scale = min(scale, 1.0)
                layer_weights = numpy.random.uniform(-scale, scale, (lyrs[i], lyrs[i+1]))

            print(layer_weights)
            weights.append(layer_weights)
            
            # 生成偏置 - 初始化为零
            layer_bias = numpy.zeros(lyrs[i+1])
            biases.append(layer_bias)

        if gpu_mode:
            weights = [cp.asarray(w) for w in weights]
            biases = [cp.asarray(b) for b in biases]

        # 构建网络
        if weights and learning_rate:
            for i in range(len(weights)):
                if lyrs[-1] > 1 and i == len(weights)-1:    # 多分类时最后一层使用softmax   (还不会用)
                    active_function = ActiveFunctions.softmax
                else:
                    active_function = ActiveFunctions.relu
                n = NeuralNetwork(weights[i].T, biases[i], learning_rate, active_function, args.gpu) 
                s.insert(n)


    
    # 获取模型的接口
    # 这个不能删除，因为后来要用到
    input_num, output_num = s.get_interf_num()
    ######################################################################
    
    # 记录教师数据路径，而不是立即加载
    teacher_data_paths = []  # 存储数据路径和分类信息
    TEACH_DATA_LENGTH = 0
    
    if args.teacher_data:
        # 检查是否是文件夹路径
        if os.path.isdir(args.teacher_data):
            # 遍历文件夹中的所有.hex文件
            image_files = glob.glob(os.path.join(args.teacher_data, '**', '*.jpg'), recursive=True) + \
                          glob.glob(os.path.join(args.teacher_data, '**', '*.JPG'), recursive=True) + \
                          glob.glob(os.path.join(args.teacher_data, '**', '*.png'), recursive=True)
            
            # 根据文件夹结构获取分类信息
            for image_file in image_files:
                # 获取文件所在的子文件夹名称作为分类
                folder_name = os.path.basename(os.path.dirname(image_file))
                
                try:
                    # 尝试将文件夹名称转换为整数分类标签
                    category = int(folder_name)
                    
                    # 记录文件路径和分类，而不是立即加载数据
                    teacher_data_paths.append({
                        'file_path': image_file,
                        'category': category,
                        'output_num': output_num  # 保存输出数量用于后续处理
                    })
                    print(f"记录文件路径：{image_file}，分类：{category}")
                    
                except ValueError:
                    print(f"警告：文件夹名称 {folder_name} 不是有效的分类标签，跳过文件 {image_file}")
                except Exception as e:
                    print(f"处理文件 {image_file} 时出错：{e}，跳过此文件") 
            
            TEACH_DATA_LENGTH = len(teacher_data_paths)
            print(f'从文件夹 {args.teacher_data} 记录了 {TEACH_DATA_LENGTH} 个教师数据路径')
            
            if TEACH_DATA_LENGTH == 0:
                print('错误：没有找到有效的教师数据文件')
                quit()
        else:
            print(f'错误：{args.teacher_data} 不是有效的文件夹路径')
            quit()

    start = time.time() 

    # 创建归一化器实例
    normalizer = ImageNormalizer(target_size=args.size)

    # 进行训练
    for i in range(t):

        group = teacher_data_paths.copy()
        teach_data_length = len(group)

        err_max_count = 0
        err_mean_count = 0

        for j in range(TEACH_DATA_LENGTH).__reversed__():

            #ridx = j # 按顺序给数据

            # FIXME: 仅在某个类别中进行随机抽取效果可能会更好（）
            ridx = random.randint(0, len(group)-1)

            path_info = group[ridx]
            group.remove(path_info)
            
            image_path = path_info['file_path']
            category = path_info['category']
            output_num = path_info['output_num']
                
            # 加载图像（其他方法需要）
            image = normalizer.load_image(image_path)
            
            # 传统归一化方法
            if args.method == 'minmax':
                normalized_image = normalizer.min_max_normalization(image, args.min, args.max)
            elif args.method == 'meanstd':
                normalized_image = normalizer.mean_std_normalization(image)
            elif args.method == 'percentile':
                normalized_image = normalizer.percentile_normalization(image, args.lower, args.upper)
            elif args.method == 'hist':
                normalized_image = normalizer.histogram_equalization(image)
            elif args.method == 'adaptive_hist':
                normalized_image = normalizer.adaptive_histogram_equalization(image, clip_limit=args.clip_limit)
            
            try:
                    
                # 创建目标输出数组（one-hot编码）
                targets = [0.0 for _ in range(output_num)]
                # 确保分类索引不超出范围
                if category < output_num:
                    targets[category] = 0.99
                
                # 
                targets = np.array(targets, ndmin=2).T
                if gpu_mode:
                    targets = cp.asarray(targets)
                
                # 将(400, 300, 3)转换为(1, 3, 300, 400)
                # 1. 增加batch维度 (400, 300, 3) -> (1, 400, 300, 3)
                # 2. 转换通道顺序 (1, 400, 300, 3) -> (1, 3, 300, 400)
                x = np.expand_dims(normalized_image, axis=0)  # 增加batch维度
                x = np.transpose(x, (0, 3, 2, 1))   # 转换为(batch, channels, height, width)格式


                
            except Exception as e:
                print(f"加载文件 {hex_file} 时出错：{e}，跳过此文件")

            outs = s.train(x, targets)
            # 计算误差的绝对值数组求平均数
            # FIXME: 有时所有结果都是空的，但是却显示误差也接近0
            abs_errors = numpy.abs(targets - outs)
            err = numpy.mean(abs_errors)
            err_max = numpy.max(abs_errors)

            if args.v:    # 在verbose模式下打印详细信息
                print('教师数据', targets, '原始输出：\n', outs)
                # 保留4位小数
                print(f'第({i}, {TEACH_DATA_LENGTH-j})次训练误差：{err:.4f}，最大误差：{err_max:.4f}')
            
            # 防止数据集过大造成变量溢出
            err_max_count += err_max / TEACH_DATA_LENGTH
            err_mean_count += err / TEACH_DATA_LENGTH
        
        # 保留4位小数
        print(f'第({i})轮训练误差：{err_mean_count:.4f}，最大误差：{err_max_count:.4f}')


    end = time.time()   # 计时
    print(f'训练时间：{end - start}s')
    ######################################################################

    # if args.o:  # 储存训练过后的网络
    #     info = s.get_info()
    #     with open(args.o, 'wb') as f:
    #         pickle.dump(info, f)

    if args.o:  # 使用MessagePack储存训练后的网络
        info = s.get_info()
        with open(args.o, 'wb') as f:
            msgpack.pack(info, f)
