'''

【人工神经网络】
20251117(qing): 输入如下内容创建新的模型并且训练


python nn.py -h    查看帮助
python nn.py --method minmax --size 400 200 -l 0.01 -t 2 -n c3,c8,c16,c32,c64,64,128,128,64,32,16,8,4,sft2 -o 20251126_0_1000.model --teacher-data-image=dataSet\human_training --plot=./400x200_20251127_0_2.png -v

NOTE: 名为“其他所有”的样品数量应该多于有明确名字的样品数量。（样品不对称，拟合过程不对称）
NOTE: ———：不知道它成不成立。



'''

from math import fabs
import random
import numpy
import numpy as np
import scipy.special
import argparse
import time
import os
import glob
import msgpack
from abc import ABC, abstractmethod 
from td import get_file_list
from td import load_data
from bot import CamTh, I2cTh
# 全局变量，它应该在main()中进行初始化
gpu_mode = False


class ActiveFunctions:
    '''激活函数类定义'''

    # 激活函数是函数
    # FIXME: 不知道为什么，似乎只有它能用
    @staticmethod
    def sigmoid(x):
        return scipy.special.expit(x)
    
    # 其他常见激活函数（注释状态）
    # Tanh函数 - 输出范围在[-1, 1]之间，中心在0
    @staticmethod
    def tanh(x):
        return numpy.tanh(x)
    
    # ReLU函数 - 修正线性单元，计算简单，缓解梯度消失
    @staticmethod
    def relu(x):
        return numpy.maximum(0, x)
    
    # Leaky ReLU函数 - 带小斜率的ReLU，解决神经元死亡问题
    @staticmethod
    def leaky_relu(x):
        return numpy.maximum(0.01 * x, x)
    
    # ELU函数 - 指数线性单元，有负值，有助于平均值接近0
    @staticmethod
    def elu(x):
        return numpy.where(x > 0, x, 0.1 * (numpy.exp(x) - 1))

    # 空激活函数 - 不进行任何变换
    @staticmethod
    def identity(x):
        return x
    
    # Softmax函数 - 通常用于多分类输出层，输出概率分布
    def softmax(x):
        exp_x = np.exp(x - np.max(x))
        return exp_x / np.sum(exp_x)

    @staticmethod
    def softmax1(scores, ):
        exp_scores = np.exp(scores)
        probabilities = exp_scores/np.sum(exp_scores)
        return np.round(probabilities,4)

#作者：上海最后深情
#链接：https://www.nowcoder.com/discuss/801943637457137664
#来源：牛客网

    def _activation_derivative(activation_function, x):
        if activation_function.__name__ == 'sigmoid':
            # 计算sigmoid函数值
            s = 1 / (1 + np.exp(-x))
            # 返回逐元素导数（对角元素）
            return s * (1 - s)

        elif activation_function.__name__ == 'relu':
            return np.where(x > 0, 1.0, 0.0)
        
        elif activation_function.__name__ == 'softmax':
            # 计算softmax输出
            y = activation_function(x)
            # 确保y是一维数组
            y = y.flatten()
            n = y.shape[0]
            jacobian = np.zeros((n, n))
            if gpu_mode:
                jacobian = cp.array(jacobian)
            for i in range(n):
                for j in range(n):
                    if i == j:
                        jacobian[i, j] = y[i] * (1 - y[j])
                    else:
                        jacobian[i, j] = -y[i] * y[j]
            return jacobian
        
        else:
            raise ValueError("不支持的激活函数")

# FIXME: 只保留必要的方法get_info()
class Layer(ABC):
    '''层类接口定义'''

    @abstractmethod
    def get_info(self) -> dict:
        '''返回该层的所有信息，包括权重以及偏执'''
        ...

    @abstractmethod
    def forward(self, inputs):
        '''正向传播方法，返回最终输出'''
        pass

    @abstractmethod
    def backward(self, errors):
        '''反向传播方法，更新权重并返回上一层的误差（方便堆叠）'''
        pass

class NeuralNetwork(Layer):
    '''人工神经网络类定义'''
    
    def get_info(self) -> dict:
        '''返回该层的所有信息，包括权重以及偏执'''
        return {
            'layer_type': 'NeuralNetwork',
            'inputs_shape': [self.inodes, 1],
            'outputs_shape': [self.onodes, 1],
            'weights': self.weights.tolist(),
            'bias': self.bias.tolist(),
            'activation_function': self.activation_function.__name__,   
            'learning_rate': self.lr
        }
    
    def get_output_shape(self) -> tuple:
        '''返回该层的输出形状'''
        return (self.onodes, 1)
    
    def get_input_shape(self) -> tuple:
        '''返回该层的输入形状'''
        return (self.inodes, 1)
    
    def set_bias(self, bias):
        '''设置偏置'''
        self.bias = bias

    def __init__(self, weights, bias, learning_rate: float, 
                 activation_function: ActiveFunctions = ActiveFunctions.sigmoid, 
                 gpu_mode=False):
        self.onodes, self.inodes = weights.shape
        self.weights = weights
        self.bias = bias
        self.lr = learning_rate
        self.activation_function = activation_function
        self.gpu_mode = gpu_mode
        self.np = cp if gpu_mode else np
        
        # 初始化存储变量
        self.pre_inputs = None
        self.pre_activation_inputs = None
        self.pre_outputs = None

    def forward(self, inputs):
        _inputs = self.weights @ inputs + self.bias.reshape(-1, 1)
        outputs = self.activation_function(_inputs)
        
        self.pre_inputs = inputs
        self.pre_activation_inputs = _inputs
        self.pre_outputs = outputs
        
        return outputs

    def backward(self, errors):
        batch_size = errors.shape[1]
        inputs = self.pre_inputs
        
        if self.activation_function.__name__ == 'softmax':
            return self._backward_softmax(errors, batch_size, inputs)
        else:
            return self._backward_standard(errors, batch_size, inputs)

    def _backward_standard(self, errors, batch_size, inputs):
        activation_derivative = ActiveFunctions._activation_derivative(
            self.activation_function, self.pre_activation_inputs
        )
        
        first = errors * activation_derivative
        self.weights_grad = (first @ inputs.T) / batch_size
        self.bias_grad = self.np.sum(first, axis=1) / batch_size
        uplevel_errors = self.weights.T @ first
        
        self.update()
        return uplevel_errors

    def _backward_softmax(self, errors, batch_size, inputs):
        weights_grad = self.np.zeros_like(self.weights)
        bias_grad = self.np.zeros_like(self.bias)
        uplevel_errors = self.np.zeros((self.inodes, batch_size))
        
        for i in range(batch_size):
            sample_input = inputs[:, i:i+1]
            sample_output = self.pre_outputs[:, i]
            sample_error = errors[:, i:i+1]
            
            jacobian = ActiveFunctions._activation_derivative(
                self.activation_function, sample_output
            )
            
            grad_contribution = jacobian @ sample_error
            weights_grad += grad_contribution @ sample_input.T
            bias_grad += grad_contribution.flatten()
            uplevel_errors[:, i:i+1] = self.weights.T @ grad_contribution
        
        self.weights_grad = weights_grad / batch_size
        self.bias_grad = bias_grad / batch_size
        
        self.update()
        return uplevel_errors

    def update(self):
        # 经验证，这里需要使用加号
        self.weights += self.lr * self.weights_grad
        self.bias -= self.lr * self.bias_grad

###############################################
#  CNN 卷积神经网络

class Conv(Layer):
    '''2D卷积层类定义'''

    def get_info(self) -> dict:
        '''返回该层的所有信息，包括权重以及偏执'''
        return {
            'layer_type': 'Conv',
            'in_channels': self.in_channels,
            'out_channels': self.out_channels,
            'weights': self.weights.tolist(),
            'bias': self.bias.tolist(),
            'kernel_size': self.kernel_size,
            'stride': self.stride,
            'padding': self.padding,
            'activation_function': self.activation_function.__name__,   
            'learning_rate': self.learning_rate,
        }

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, learning_rate=0.01, activation_function=ActiveFunctions.relu):
        if isinstance(kernel_size, int):
            kernel_size = (kernel_size, kernel_size)
        
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.learning_rate = learning_rate
        
        # 激活函数 - 默认为None（无激活函数）
        self.activation_function = activation_function
        
        # 权重和偏置 - 初始化为None，通过set_weights外部设置
        self.weights = None
        self.bias = None
        
        # 输入输出形状缓存
        self._input_shape = None
        self._output_shape = None
        
        self.input = None
        self.col = None
        self.pre_activation_output = None  # 存储激活前的输出，用于反向传播
    
    def set_weights(self, weights, bias):
        """设置权重和偏置"""
        #print('Conv weights mean: ', numpy.mean(weights))
        self.weights = weights
        self.bias = bias

    def im2col(self, x):
        """向量化的im2col实现"""
        batch_size, in_channels, in_height, in_width = x.shape

        # 计算输出尺寸
        padded_height = in_height + 2 * self.padding
        padded_width = in_width + 2 * self.padding
        
        out_height = (padded_height - self.kernel_size[0]) // self.stride + 1
        out_width = (padded_width - self.kernel_size[1]) // self.stride + 1

        # 添加padding
        if self.padding > 0:
            x_padded = np.pad(x, ((0, 0), (0, 0), 
                                (self.padding, self.padding), 
                                (self.padding, self.padding)), mode='constant')
        else:
            x_padded = x
        
        # 使用stride_tricks创建视图
        strides = (x_padded.strides[0], 
                x_padded.strides[1], 
                self.stride * x_padded.strides[2], 
                self.stride * x_padded.strides[3], 
                x_padded.strides[2], 
                x_padded.strides[3])
        
        shape = (batch_size, in_channels, out_height, out_width, 
                self.kernel_size[0], self.kernel_size[1])

        patches = np.lib.stride_tricks.as_strided(
            x_padded, shape=shape, strides=strides, writeable=False
        )
        # 重塑为列矩阵
        col = patches.reshape(batch_size, in_channels * self.kernel_size[0] * self.kernel_size[1], 
                            out_height * out_width)

        return col, out_height, out_width
    
    def col2im(self, col, input_shape):
        """将列矩阵转换回图像格式"""
        batch_size, in_channels, in_height, in_width = input_shape
        
        # 创建输出图像
        if self.padding > 0:
            dx_padded = np.zeros((batch_size, in_channels, 
                                in_height + 2 * self.padding, 
                                in_width + 2 * self.padding))
        else:
            dx_padded = np.zeros((batch_size, in_channels, in_height, in_width))
        
        out_height = (in_height + 2 * self.padding - self.kernel_size[0]) // self.stride + 1
        out_width = (in_width + 2 * self.padding - self.kernel_size[1]) // self.stride + 1
        
        for b in range(batch_size):
            col_idx = 0
            for i in range(0, in_height + 2 * self.padding - self.kernel_size[0] + 1, self.stride):
                for j in range(0, in_width + 2 * self.padding - self.kernel_size[1] + 1, self.stride):
                    patch = col[b, :, col_idx].reshape(in_channels, self.kernel_size[0], self.kernel_size[1])
                    dx_padded[b, :, i:i+self.kernel_size[0], j:j+self.kernel_size[1]] += patch
                    col_idx += 1
        
        # 移除padding
        if self.padding > 0:
            return dx_padded[:, :, self.padding:-self.padding, self.padding:-self.padding]
        return dx_padded

    def forward(self, x):
        """前向传播，使用权重进行卷积计算"""
        # 缓存输入
        self.input = x
        self._input_shape = x.shape
        batch_size = x.shape[0]
        
        # im2col转换
        self.col, out_height, out_width = self.im2col(x)

        # 缓存输出形状
        self._output_shape = (batch_size, self.out_channels, out_height, out_width)
        
        # 重塑权重为2D
        weights_2d = self.weights.reshape(self.out_channels, -1)
        
        # 计算输出
        output = np.zeros((batch_size, self.out_channels, out_height, out_width))
        
        for b in range(batch_size):
            # 矩阵乘法
            conv_result = weights_2d @ self.col[b]  # (out_channels, out_h * out_w)
            conv_result = conv_result.reshape(self.out_channels, out_height, out_width)
            conv_result += self.bias.reshape(-1, 1, 1)  # 添加偏置
            
            output[b] = conv_result

        # 缓存激活前的输出（用于反向传播）
        self.pre_activation_output = output.copy()
        
        # 应用激活函数
        if self.activation_function is not None:
            output = self.activation_function(output)
        
        return output
            
    def backward(self, dout):
        """改进的反向传播"""
        batch_size = dout.shape[0]

        # 计算激活函数的导数
        if self.activation_function is not None:
            activation_derivative = ActiveFunctions._activation_derivative(
                self.activation_function, self.pre_activation_output)
            dout = dout * activation_derivative

        # 初始化梯度
        self.weights_grad = np.zeros_like(self.weights)
        self.bias_grad = np.zeros_like(self.bias)
        
        # 重塑权重
        weights_2d = self.weights.reshape(self.out_channels, -1)

        # 计算权重梯度和偏置梯度（去掉不必要的归一化）
        for b in range(batch_size):
            dout_reshaped = dout[b].reshape(self.out_channels, -1)
            weights_grad_2d = dout_reshaped @ self.col[b].T  # 去掉空间归一化
            self.weights_grad += weights_grad_2d.reshape(self.weights.shape)
            self.bias_grad += np.sum(dout[b], axis=(1, 2))   # 去掉空间归一化

        # 只进行批量归一化
        self.weights_grad /= batch_size
        self.bias_grad /= batch_size
        
        # 计算输入梯度
        dx = np.zeros_like(self.input)
        for b in range(batch_size):
            dout_reshaped = dout[b].reshape(self.out_channels, -1)
            dx_col = weights_2d.T @ dout_reshaped
            dx_col_reshaped = dx_col.reshape(1, dx_col.shape[0], dx_col.shape[1])
            dx[b] = self.col2im(dx_col_reshaped, self.input[b:b+1].shape)[0]
        
        self.update()
        return dx
              
    def update(self):
        """权重和偏置更新"""

        #print(f'Conv weights_grad mean: {np.mean(self.weights_grad)}')
        #print(f'Conv bias_grad mean: {np.mean(self.bias_grad)}')

        if hasattr(self, 'weights_grad') and self.weights_grad is not None:
            self.weights += self.learning_rate * self.weights_grad
            
        if hasattr(self, 'bias_grad') and self.bias_grad is not None:
            self.bias -= self.learning_rate * self.bias_grad
            
class MaxPool(Layer):

    def get_info(self) -> dict:
        '''返回该层的所有信息'''
        return {
            'layer_type': 'MaxPool',
            'pool_size': self.pool_size,
            'stride': self.stride,
            'padding': self.padding,
        }

    def __init__(self, pool_size=2, stride=2, padding=0):
        self.pool_size = pool_size
        self.stride = stride
        self.padding = padding
        self.input = None
        self._input_shape = None
        self._output_shape = None

    def forward(self, x):
        """向量化的最大池化前向传播（性能更好）"""
        self.input = x
        batch_size, channels, in_height, in_width = x.shape
        self._input_shape = x.shape
        
        # 计算输出尺寸
        padded_height = in_height + 2 * self.padding
        padded_width = in_width + 2 * self.padding
        
        out_height = (padded_height - self.pool_size) // self.stride + 1
        out_width = (padded_width - self.pool_size) // self.stride + 1
        self._output_shape = (batch_size, channels, out_height, out_width)
        
        # 添加padding
        if self.padding > 0:
            x_padded = np.pad(x, ((0, 0), (0, 0), 
                                (self.padding, self.padding), 
                                (self.padding, self.padding)), mode='constant')
        else:
            x_padded = x
        
        # 使用stride_tricks创建窗口视图
        shape = (batch_size, channels, out_height, out_width, self.pool_size, self.pool_size)
        strides = (x_padded.strides[0], x_padded.strides[1], 
                self.stride * x_padded.strides[2], self.stride * x_padded.strides[3],
                x_padded.strides[2], x_padded.strides[3])
        
        windows = np.lib.stride_tricks.as_strided(
            x_padded, shape=shape, strides=strides, writeable=False
        )
        # 计算最大值和位置
        output = np.max(windows, axis=(4, 5))
        # 找到最大值位置（相对位置）
        max_indices_rel = np.argmax(windows.reshape(batch_size, channels, out_height, out_width, -1), axis=4)
        max_pos_rel = np.unravel_index(max_indices_rel, (self.pool_size, self.pool_size))
        
        # 转换为绝对位置
        h_coords = np.arange(out_height)[:, None] * self.stride + max_pos_rel[0]
        w_coords = np.arange(out_width)[None, :] * self.stride + max_pos_rel[1]
        
        max_indices = np.stack([h_coords, w_coords], axis=-1)
        max_indices = np.broadcast_to(max_indices, (batch_size, channels, out_height, out_width, 2))

        
        
        self.cache = (x_padded, max_indices)
        return output

    def backward(self, dout):
        """
        2D最大池化的反向传播
        """
        x_padded, max_indices = self.cache
        batch_size, channels, out_height, out_width = dout.shape
        
        # 初始化输入梯度（使用填充后的形状）
        dx_padded = np.zeros_like(x_padded)
        
        for b in range(batch_size):
            for c in range(channels):
                for oh in range(out_height):
                    for ow in range(out_width):
                        # 获取最大值在填充后图像中的位置
                        max_h, max_w = max_indices[b, c, oh, ow]
                        dx_padded[b, c, max_h, max_w] += dout[b, c, oh, ow]
        
        # 移除padding（如果存在）
        if self.padding > 0:
            dx = dx_padded[:, :, self.padding:-self.padding, self.padding:-self.padding]
        else:
            dx = dx_padded
        
        return dx

class Flatten(Layer):
    '''展平层'''

    def get_info(self) -> dict:
        '''返回该层的信息'''
        return {
            'layer_type': 'Flatten',
        }

    def forward(self, x):
        '''前向传播'''
        batch_size = x.shape[0]
        self.cache = x.shape
        output = x.reshape(batch_size, -1).T
        return output

    def backward(self, dout):
        '''反向传播'''
        return dout.reshape(self.cache)

###############################################
#  可堆叠神经网络

class StackingNN:
    '''堆叠的神经网络'''
    
    def insert(self, layer: Layer):
        '''在神经网络中插入一个层'''
        self.vec.append(layer)


    #def __init__(self,
    #    in_channels=3, out_channels=8, kernel_size=3, convStride=1, padding=0,
    #    pool_size=2, poolStride=2,
    #    weights: list=None, learning_rate: float = None, dropout_rate=0.0):
    def __init__(self, dropout_rate=0.0):
        '''构造堆叠的神经网络'''
            
        # 这个向量主要用于存储神经层
        self.vec = []

        # 记录 dropout 率（这个参数的意思是在训练时，每个神经元以 1-dropout_rate 的概率被“dropout”，即被设置为 0）
        self.dropout_rate = dropout_rate



    def train(self, inputs, targets: list):
        '''训练一次神经网络组'''

        # 转换输入列表到二维数组
        inputs_now = inputs

        # 计算每一层的输出
        for n in self.vec:
            if gpu_mode and n.__class__.__name__ == 'Flatten':
                inputs_now = cp.asarray(inputs_now)
            inputs_now = n.forward(inputs_now)
        
        # 最后的输出
        final_outs = inputs_now

        # 堆叠式反向传播
        err = self.calc_err(targets, final_outs)

        err_now = err
        for i in range(len(self.vec)).__reversed__():
            if gpu_mode and self.vec[i].__class__.__name__ == 'Flatten':
                err_now = err_now.get()
            n = self.vec[i]
            err_now = n.backward(err_now)

        return final_outs



    def query(self, inputs):
        '''堆叠式前向反馈'''
        # 
        inputs_now = inputs

        # 计算每一层的输出
        for n in self.vec:
            if gpu_mode and n.get_type() == 'Flatten':
                inputs_now = cp.asarray(inputs_now)
            inputs_now = n.forward(inputs_now)

        if gpu_mode:
            inputs_now = inputs_now.get()

        return inputs_now
    
    def get_info(self) -> dict:
        '''返回这个神经网络模型的所有信息'''
        info = {
            'type': 'Stacking Neural Network',
            'dropout_rate': self.dropout_rate,
            'layers': [n.get_info() for n in self.vec]
        }
        return info

    
    def get_output_num(self):
        '''获取输入数量'''

        if len(self.vec) < 1:
            return -1
        
        output_num, _ = self.vec[-1].get_output_shape()

        return output_num



    def calc_err(self, targets, outputs):
        '''计算误差方法，返回误差矩阵'''

        # 误差是 (target - actual)
        return targets - outputs
            
    
    class NNExp(Exception):
        '''非法的接口，接口不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    
    class InvalidConfigure(NNExp):
        '''非法的配置，配置不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    
    class InvalidInterface(NNExp):
        '''非法的接口，接口不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    class InvalidLayerType(NNExp):
        '''非法的层类型，层类型不匹配'''
        def __init__(self, msg: str):
            super().__init__(msg)
        def __str__(self):
            return super().__str__()

    @staticmethod
    def crt_nn_by_info(info: dict):
        '''
        通过存储的模型信息创建一个堆叠的神经网络模型
        '''
        # 检查模型的形状……
        #
        if 'type' not in info.keys() or info['type'] != 'Stacking Neural Network':
            print('Invalid model file.')
            quit()

        # 创建神经网络模型
        s = StackingNN(info['dropout_rate'])
        for i in info['layers']:    # FIXME: 输出模型的信息
            if i['layer_type'] == 'Conv': 
                print(i.keys())
                in_channels = i['in_channels']
                out_channels = i['out_channels']
                weights = numpy.array(i['weights'])
                bias = numpy.array(i['bias'])
                #if gpu_mode:
                #    weights = cp.asarray(weights)
                #    bias = cp.asarray(bias)
                # 形状检查
                expected_shape = (out_channels, in_channels, *i['kernel_size'])
                if not expected_shape == weights.shape:
                    raise StackingNN.InvalidInterface(f'Invalid interface: {i["layer_type"]} weights shape: {weights.shape}. Expected: {expected_shape}')
                if not (out_channels,) == bias.shape:
                    raise StackingNN.InvalidInterface(f'Invalid interface: {i["layer_type"]} bias shape: {bias.shape}.')
                # 创建卷积层，支持外部权重初始化
                conv = Conv(in_channels, out_channels,
                    i['kernel_size'], i['stride'], i['padding'],
                    learning_rate, getattr(ActiveFunctions, i['activation_function']))
                conv.set_weights(weights, bias)
                s.insert(conv)
            elif i['layer_type'] == 'MaxPool':        
                s.insert(MaxPool(i['pool_size'], i['stride']))
            elif i['layer_type'] == 'Flatten':
                s.insert(Flatten())
            elif i['layer_type'] == 'NeuralNetwork':
                inputs_num, _ = i['inputs_shape']
                outputs_num,_ = i['outputs_shape']
                weights = numpy.array(i['weights'])
                bias = numpy.array(i['bias'])
                if gpu_mode:
                    weights = cp.asarray(weights)
                    bias = cp.asarray(bias)
                # 形状检查
                if not (outputs_num,inputs_num) == weights.shape:
                    raise StackingNN.InvalidInterface('Invalid interface.')
                if not (outputs_num,) == bias.shape:
                    raise StackingNN.InvalidInterface('Invalid interface.')
                nn = NeuralNetwork(weights, bias, learning_rate, getattr(ActiveFunctions, i['activation_function']), args.gpu)
                s.insert(nn)
            else:
                raise StackingNN.InvalidLayerType('Invalid layer type.')
        return s
    
    @staticmethod 
    def crt_nn_by_args(args: argparse.Namespace):
        '''
        通过命令行参数创建一个堆叠的神经网络模型
        '''

        # 分割层配置字符串
        lyrs = [item for item in str(args.n).split(',')]

        # 创建可堆叠神经元组
        s = StackingNN(args.dropout)
        
        for i in range(len(lyrs)-1):

            if lyrs[i].startswith('c') and lyrs[i+1].startswith('c'):

                # 解析命令行参数
                # FIXME: 也许还能有更为复杂的行为，比如各种参数的设定
                in_channels = int(lyrs[i][1:])
                out_channels = int(lyrs[i+1][1:])

                # 计算卷积层的权重形状：(out_channels, in_channels, kernel_height, kernel_width)
                conv_weights_shape = (out_channels, in_channels, args.kernel_size, args.kernel_size)
                fan_in = in_channels * args.kernel_size * args.kernel_size
                std = np.sqrt(2.0 / fan_in)  # Kaiming初始化标准差
                conv_weights = numpy.random.normal(0.0, std, conv_weights_shape)
                conv_bias = numpy.zeros(out_channels)
                print('conv_weights mean: ', numpy.mean(conv_weights))
                print('conv_bias mean: ', numpy.mean(conv_bias))

                # 创建卷积层并设置权重
                conv = Conv(in_channels, out_channels, args.kernel_size, args.conv_stride, args.padding, learning_rate, ActiveFunctions.relu)
                conv.set_weights(conv_weights, conv_bias) 
                s.insert(conv)
                
                # 创建池化层
                s.insert(MaxPool(args.pool_size, args.pool_stride))

            elif lyrs[i].startswith('c') and not lyrs[i+1].startswith('c'):        
                s.insert(Flatten())

            # 还不是最后一层
            elif lyrs[i].isdigit() and i != len(lyrs)-2 and lyrs[i+1].isdigit():
                in_num = int(lyrs[i])
                out_num = int(lyrs[i+1]) 

                # Xavier/Glorot 初始化：根据输入输出维度自动调整权重范围，保持方差一致
                # 似乎可以用在新的softmax层(20251118)
                scale = numpy.sqrt(6.0 / (in_num + out_num))
                # 限制scale不超过1.0，确保范围不超出[-1.0, 1.0]
                scale = min(scale, 1.0)
                layer_weights = numpy.random.uniform(-scale, scale, (in_num, out_num))

                # 生成偏置 - 初始化为零
                layer_bias = numpy.zeros(out_num)

                # 设置激活函数
                active_function = ActiveFunctions.relu

                print('nn_weights mean: ', numpy.mean(layer_weights))
                print('nn bias mean: ', numpy.mean(layer_bias))

                if gpu_mode:
                    layer_weights = cp.asarray(layer_weights)
                    layer_bias= cp.asarray(layer_bias)

                # 构建网络
                s.insert(NeuralNetwork(layer_weights.T, layer_bias, learning_rate, active_function, args.gpu))

            # 最后一层无标签
            elif lyrs[i].isdigit() and i == len(lyrs)-2 and lyrs[i+1].isdigit():
                raise ValueError("The last layer should take a lable parameter.")

            # 最后一层是sigmoid
            elif lyrs[i].isdigit() and i == len(lyrs)-2 and lyrs[i+1].startswith('sgm'):
                in_num = int(lyrs[i])
                out_num = int(lyrs[i+1][3:])

                # Xavier/Glorot 初始化：根据输入输出维度自动调整权重范围，保持方差一致
                # 似乎可以用在新的softmax层(20251118)
                scale = numpy.sqrt(6.0 / (in_num + out_num))
                # 限制scale不超过1.0，确保范围不超出[-1.0, 1.0]
                scale = min(scale, 1.0)
                layer_weights = numpy.random.uniform(-scale, scale, (in_num, out_num))

                # 生成偏置 - 初始化为零
                layer_bias = numpy.zeros(out_num)
                
                # 选择激活函数
                active_function = ActiveFunctions.sigmoid

                print('nn_weights mean: ', numpy.mean(layer_weights))
                print('nn bias mean: ', numpy.mean(layer_bias))

                if gpu_mode:
                    layer_weights = cp.asarray(layer_weights)
                    layer_bias= cp.asarray(layer_bias)

                # 构建网络
                s.insert(NeuralNetwork(layer_weights.T, layer_bias, learning_rate, active_function, args.gpu))

            # 最后一层是softmax
            elif lyrs[i].isdigit() and i == len(lyrs)-2 and lyrs[i+1].startswith('sft'):
                in_num = int(lyrs[i])
                out_num = int(lyrs[i+1][3:])

                # Xavier/Glorot 初始化：根据输入输出维度自动调整权重范围，保持方差一致
                # 似乎可以用在新的softmax层(20251118)
                scale = numpy.sqrt(6.0 / (in_num + out_num))
                # 限制scale不超过1.0，确保范围不超出[-1.0, 1.0]
                scale = min(scale, 1.0)
                layer_weights = numpy.random.uniform(-scale, scale, (in_num, out_num))

                # 生成偏置 - 初始化为零
                layer_bias = numpy.zeros(out_num)
                
                # 选择激活函数
                active_function = ActiveFunctions.softmax

                print('nn_weights mean: ', numpy.mean(layer_weights))
                print('nn bias mean: ', numpy.mean(layer_bias))

                if gpu_mode:
                    layer_weights = cp.asarray(layer_weights)
                    layer_bias= cp.asarray(layer_bias)

                # 构建网络
                s.insert(NeuralNetwork(layer_weights.T, layer_bias, learning_rate, active_function, args.gpu))

        return s




def get_model_info(model_name: str):
    # 获取模型信息
    with open(model_name, 'rb') as f:
        info = msgpack.unpack(f)
        return info

def save_model_info(model_name: str, info: dict):
    # 保存模型信息
    with open(model_name, 'wb') as f:
        msgpack.pack(info, f)

def get_imgs_list(target_dir: str):
    '''
    获取目标目录下所有图片文件的路径列表。

    参数:
    target_dir (str): 目标目录路径。

    返回:
    list: 包含所有图片文件路径的列表。
    '''
    
    # 检查是否是文件夹路径
    if os.path.isdir(target_dir):
        # 遍历文件夹中的所有.hex文件
        image_files = glob.glob(os.path.join(target_dir, '**', '*.jpg'), recursive=True) + \
                        glob.glob(os.path.join(target_dir, '**', '*.JPG'), recursive=True) + \
                        glob.glob(os.path.join(target_dir, '**', '*.png'), recursive=True)
        return get_file_list(image_files)

    else:
        print(f'错误：{target_dir} 不是有效的文件夹路径')
        quit()

def get_sgf_list(target_dir: str):
    '''
    获取目标目录下所有sgf文件的路径列表。

    参数:
    target_dir (str): 目标目录路径。

    返回:
    list: 包含所有sgf文件路径的列表。
    '''
    
    # 检查是否是文件夹路径
    if os.path.isdir(target_dir):
        # 遍历文件夹中的所有.hex文件
        image_files = glob.glob(os.path.join(target_dir, '**', '*.sgf'), recursive=True) + \
                        glob.glob(os.path.join(target_dir, '**', '*.SGF'), recursive=True)
        return get_file_list(image_files)

    else:
        print(f'错误：{target_dir} 不是有效的文件夹路径')
        quit()



def train_imgs(imgs_list: list, s: NeuralNetwork, args: argparse.Namespace):
    '''
    训练神经网络模型。

    参数:
    imgs_list (list): 包含所有图片文件路径的列表。
    s (NeuralNetwork): 神经网络模型实例。
    args (argparse.Namespace): 命令行参数命名空间。
    '''
    
    # 误差记录
    errs = []
    errs_max = []
    # 求数据集长度
    TEACH_DATA_LENGTH = len(imgs_list)
    #
    start = time.time() 
    # 进行训练
    for i in range(t):
        group = imgs_list.copy()
        err_max_count = 0
        err_mean_count = 0
        for j in range(TEACH_DATA_LENGTH).__reversed__():

            #ridx = j # 按顺序给数据

            # FIXME: 仅在某个类别中进行随机抽取效果可能会更好（）
            ridx = random.randint(0, len(group)-1)

            path_info = group[ridx]
            group.remove(path_info)
            
            image_path = path_info['file_path']
            category = path_info['category']
                
            # 加载图像（其他方法需要）
            normalized_image = load_data(image_path, args)
            
            try:
                    
                # 创建目标输出数组（one-hot编码）
                targets = [0.0 for _ in range(output_num)]
                # 确保分类索引不超出范围
                if category < output_num:
                    targets[category] = 0.99
                
                # 
                targets = np.array(targets, ndmin=2).T
                if gpu_mode:
                    targets = cp.asarray(targets)
                
                # 将(400, 300, 3)转换为(1, 3, 300, 400)
                # 1. 增加batch维度 (400, 300, 3) -> (1, 400, 300, 3)
                # 2. 转换通道顺序 (1, 400, 300, 3) -> (1, 3, 300, 400)
                x = np.expand_dims(normalized_image, axis=0)  # 增加batch维度
                x = np.transpose(x, (0, 3, 2, 1))   # 转换为(batch, channels, height, width)格式


                
            except Exception as e:
                print(f"加载文件 {image_path} 时出错：{e}，跳过此文件")

            outs = s.train(x, targets)
            # 改用均方误差（MSE）衡量误差，避免空结果时误差被稀释
            squared_errors = (targets - outs) ** 2
            err = numpy.mean(squared_errors)
            err_max = numpy.max(squared_errors)

            if args.v:    # 在verbose模式下打印详细信息
                print('教师数据', targets, '原始输出：\n', outs)
                # 保留4位小数
                print(f'第({i+1}, {TEACH_DATA_LENGTH-j})次训练误差：{err:.8f}，最大误差：{err_max:.8f}')
                #
                print(f'训练集长度：{TEACH_DATA_LENGTH}')
            
            # 防止数据集过大造成变量溢出
            err_max_count += err_max / TEACH_DATA_LENGTH  # 取最大误差的平均值
            err_mean_count += err / TEACH_DATA_LENGTH
        
        if gpu_mode:
            err_mean_count = err_mean_count.get()
            err_max_count = err_max_count.get()
        # 进行记录
        errs.append(err_mean_count)            
        errs_max.append(err_max_count)            
        # 保留4位小数
        print(f'第({i+1})轮训练误差：{err_mean_count:.8f}，最大误差：{err_max_count:.8f}')


    end = time.time()   # 计时
    print(f'训练时间：{end - start}s')

def train_sgf(sgf_list: list, s: NeuralNetwork, args: argparse.Namespace):
    '''
    训练神经网络模型。

    参数:
    sgf_list (list): 包含所有sgf文件路径的列表。
    s (NeuralNetwork): 神经网络模型实例。
    args (argparse.Namespace): 命令行参数命名空间。
    '''
    
    TEACH_DATA_LENGTH = len(sgf_list)

    # 开始训练
    start = time.time()

    for k in range(t):
        err_max_count = 0
        err_mean_count = 0

        for i in range(TEACH_DATA_LENGTH):
            filepath =sgf_list[i]['file_path']
            td0, td1 = load_data(filepath, args)
            size = len(td0)
            for j in range(size):
                x = td0[j:j+1]
                t = td1[j:j+1]
                #print(x.shape, t.shape)
                #print(x)
                outs = s.train(x, t)

                # 计算平均误差
                # 改用均方误差（MSE）衡量误差，避免空结果时误差被稀释
                squared_errors = (targets - outs) ** 2
                err = numpy.mean(squared_errors)
                err_max = numpy.max(squared_errors)

                # 防止数据集过大造成变量溢出
                err_max_count += err_max / TEACH_DATA_LENGTH / size
                err_mean_count += err / TEACH_DATA_LENGTH / size

                if args.v:    # 在verbose模式下打印详细信息
                    print('教师数据', t, '原始输出：\n', outs)
                    # 保留4位小数
                    print(f'第({i+1}, {TEACH_DATA_LENGTH-j})次训练误差：{err:.8f}，最大误差：{err_max:.8f}')

        # 保留4位小数
        print(f'第({k+1})轮训练误差：{err_mean_count:.8f}，最大误差：{err_max_count:.8f}')

    end = time.time()   # 计时
    print(f'训练时间：{end - start}s')

I2C_BUS = 1
MEM_SIZE = 10  # 池大小
SERVO_NUM = 12  # 舵机数量
class SpiderBotTrainning:
    '''
    用于四足机器人的深度强化学习
    '''

    def __init__(self) -> None:
        pass

    def get_blank_action(self) -> numpy.ndarray:
        '''获取空白动作'''
        actions = []
        for _ in range(MEM_SIZE):
            actions.append([0 for _ in range(SERVO_NUM)])
        return actions

    def cut_arr(self, arr: numpy.ndarray, length: int) -> numpy.ndarray:
        '''裁剪数组'''
        if len(arr) > length:
            return arr[-length:]
        else:
            return arr

    def get_loss(self, stats: numpy.ndarray) -> numpy.ndarray:
        '''计算mpu6050数据的误差，尤其是姿态角度的误差'''
        baseline = [0.0, 0.0, 0.0, 25.0]  # 传感器基线
        err_cur = baseline - stats[-1]
        err_pre = baseline - stats[-2]
        loss=  [] # 变化损失
        for i in range(len(baseline)):
            dd = err_pre[i] - err_cur[i]
            flag = err_pre[i] > 0 # 前一帧角度是否大于0, hack
            dd = dd if flag else -dd  # 这样一来当角度变化接近基线时，误差为正数
            loss.append(-dd) # 这样一来当角度变化远离基线时，误差为正数
        max = numpy.max(numpy.abs(loss))
        for i in range(len(loss)):
            loss[i] = loss[i] / max # 归一化
        return numpy.array(loss) # 计算变化损失（FIXME）

    def normalize(self, data: numpy.ndarray, method: str = 'minmax') -> numpy.ndarray:
        '''归一化数据'''
        min_val = numpy.min(data)
        max_val = numpy.max(data)
        if method == 'minmax':
            return (data - min_val) / (max_val - min_val)
        #elif method == 'zscore':
        #    return (data - numpy.mean(data)) / numpy.std(data)
        else:
            raise ValueError(f'未知归一化方法：{method}')

    def run(self, s: StackingNN, args: argparse.Namespace):
        '''
        训练深度强化学习模型。
        '''
        # 蜘蛛机器人
        # 自由度： 8
        # 每个自由度的角度范围： -90 ~ 90 度
        # 每个自由度的角度分辨率： 1 度
        #
        # 输出矩阵： SERVO_NUM x 180 个输出
        # SERVO_NUM 个自由度，每个自由度有 180 个角度
        #
        # 传感器收集数据 4 项
        # 1. 姿态角度（roll, pitch, yaw）
        # 2. 温度（摄氏度）
        #
        # 传感器收集尺寸： MEM_SIZE 次
        #
        # 输入矩阵： 4 x MEM_SIZE 个输入
        # 4 项传感器数据，每个数据有 MEM_SIZE 次采集

        with I2cTh(I2C_BUS) as i2c:
            with CamTh(0) as cam:
                # 开始训练
                start = time.time()
                stats = [] # 状态记录
                actions = self.get_blank_action()  # 动作记录，用于奖励和惩罚的判定
                pool = [] # 经验池
                for _ in range(t):
                    # 获取数据
                    stats.append(i2c.get_pool())
                    if len(stats) <= MEM_SIZE: # 等待
                        continue 
                    else:  # 裁剪数据
                        stats = self.cut_arr(stats, MEM_SIZE)

                    # 在这里计算误差
                    # FIXME: 这个误差的计算其实不合理
                    # FIXME: 未进行延迟奖励
                    if len(actions) > 0:
                        err = self.get_loss(stats)  # NOTE: 这个损失是相对基线的变化
                        y = numpy.zeros((SERVO_NUM, 180), dtype=float)
                        for j in range(SERVO_NUM): # err.shape -> (SERVO_NUM,)
                            y[j, actions[-1][j]] = err[j] # 误差赋值到对应位置
                            # 归一化误差
                        y = self.normalize(y, args.method)  # 进行归一化
                        y = y.reshape(SERVO_NUM * 180, 1) # 重塑为 SERVO_NUM * 180 x 1 矩阵
                        x = numpy.array(status) # 获取当前状态
                        x = x.reshape(MEM_SIZE * 4, 1) # 重塑为 MEM_SIZE * 4 x 1 矩阵
                        s.train(x, y) # 训练神经网络
                        pool.append((x.copy(), y.copy())) # 加入数据池

                    data = numpy.array(stats) # FIXME: 每次循环都要转换为数组
                    data = data.reshape(MEM_SIZE * 4, 1) # 重塑为 MEM_SIZE * 4 x 1 矩阵
                    data = self.normalize(data, args.method)  # 归一化
                    out = s.query(data) # 取得神经网络的输出
                    rand = numpy.random.rand(*out.shape)# 生成随机因子
                    out = out + rand # 加入随机因子
                    out = out.reshape(SERVO_NUM, 180) # 重塑为 SERVO_NUM x 180 矩阵
                    idxs = np.argmax(out, axis=0) # 获取每一列的最大值索引
                    # 此时idx的形状应该是 (SERVO_NUM,)
                    # 对应SERVO_NUM个自由度的角度索引
                    if not idxs.shape == (SERVO_NUM,):
                        raise ValueError(f'错误：idx形状为 {idxs.shape}，预期为 ({SERVO_NUM},)')
                    # 控制舵机
                    for j in range(SERVO_NUM):
                        pca.set_servo_angle(j, idxs[j])
                    # 记录动作
                    actions.append(idxs.tolist())
                    actions = self.cut_arr(actions, MEM_SIZE)

                end = time.time()   # 计时
                print(f'训练时间：{end - start}s')


###############################################
if __name__ == "__main__":
    parser = argparse.ArgumentParser('This is a neural network program.')
    # 归一化参数
    parser.add_argument('--method', type=str, choices=['minmax', 'meanstd', 'percentile', 'hist', 'adaptive_hist', 'conv_pool', 'check'],
                        help='归一化方法: minmax(最小-最大), meanstd(均值-标准差), percentile(百分位), hist(直方图均衡化), adaptive_hist(自适应直方图均衡化), conv_pool(卷积池化处理), check(检查图像属性)')
    
    parser.add_argument('--size', type=int, nargs=2, metavar=('WIDTH', 'HEIGHT'),
                        help='目标图像尺寸 (宽度 高度)')
    
    # 方法特定参数
    parser.add_argument('--min', type=float, default=0.0, help='最小-最大归一化的最小值 (默认: 0.0)')
    parser.add_argument('--max', type=float, default=1.0, help='最小-最大归一化的最大值 (默认: 1.0)')
    parser.add_argument('--lower', type=float, default=1.0, help='百分位归一化的低百分位 (默认: 1.0)')
    parser.add_argument('--upper', type=float, default=99.0, help='百分位归一化的高百分位 (默认: 99.0)')
    parser.add_argument('--clip-limit', type=float, default=0.03, help='自适应直方图均衡化的裁剪限制 (默认: 0.03)')
    #######################################
    # 神经网络参数
    parser.add_argument('-n', type=str, help='Input a list of integers splited by comma.')
    parser.add_argument('-i', type=str, help='Input a hex file as neural network model.')
    parser.add_argument('-o', type=str, help='Output the model to a file as hex.')
    parser.add_argument('-l', type=float, help='Set the learnning rate of the neural netword.', required=True)
    parser.add_argument('-t', type=int, help='How many times you want to run.', required=True)
    parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate (0.0 to 1.0), default is 0.0 (no dropout)')
    parser.add_argument('--in-channels', type=int, default=3, help='In channels, default is 3')
    parser.add_argument('--out-channels', type=int, default=8, help='Out channels, default is 8')
    parser.add_argument('--kernel-size', type=int, default=3, help='Kernel size, default is 3')
    parser.add_argument('--padding', type=int, default=1, help='Padding, default is 0')
    parser.add_argument('--conv-stride', type=int, default=2, help='Conv stride, default is 1')
    parser.add_argument('--pool-size', type=int, default=2, help='Pool size, default is 4')
    parser.add_argument('--pool-stride', type=int, default=2, help='Pool stride, default is 3')
    # 训练参数
    parser.add_argument('--gpu', action='store_true', help='Use gpu to simulate.') 
    parser.add_argument('--drl', action='store_true', help='Use deep reinforcement learning.')
    parser.add_argument('-v', action='store_true', help='Verbose mode')
    parser.add_argument('--teacher-data-image', type=str, help='Teacher data input')
    parser.add_argument('--teacher-data-sgf', type=str, help='Teacher data input')
    parser.add_argument('--plot', type=str, help='Plot the errors to a file.')
    args = parser.parse_args() # 解析参数
    
    # 学习率
    if args.l: learning_rate = args.l

    if args.gpu:
        gpu_mode = True
        import cupy as cp

    if args.n and args.i:   # 不能同时设置输入文件与新的模型
        print('You can not set -n and -i both onetime.')
        quit()
    if not args.n and not args.i:   # 必须输入模型
        print('You must set model.  Use -n or -i.')
        quit()
    if (not args.method or not args.size) and args.teacher_data_image: # 训练图片的话必须设置尺寸和归一化方式
        print('You must set --size  if you want to train images.')
        quit()

    if args.t:  # 学习次数
        t = args.t

    if args.i:  # 加载神经网络模型列
        info = get_model_info(args.i)
        s = StackingNN.crt_nn_by_info(info)

    if args.n:  # 创建新模型时使用的神经网络规模
        s = StackingNN.crt_nn_by_args(args)
    
    # 获取模型的接口
    # 这个不能删除，因为后来要用到
    output_num = s.get_output_num()
    ######################################################################
    
    # 如果你输入了图片格式的教师数据
    # FIXME: 当数据集数量过大时似乎永远也循环不完
    # FIXME: 似乎会训练两遍
    if args.teacher_data_image:
        # 获取所有图片文件路径
        teacher_data_paths = get_imgs_list(args.teacher_data_image)
        # 训练图片
        train_imgs(teacher_data_paths, s, args)

    # 如果你输入了sgf围棋棋谱格式的教师数据
    elif args.teacher_data_sgf:
        # 获取所有sgf文件路径
        teacher_data_paths = get_sgf_list(args.teacher_data_sgf)
        # 训练sgf
        train_sgf(teacher_data_paths, s, args)
    
    elif args.drl:
        # 深度强化学习
        bot = SpiderBotTrainning()
        bot.run(bot, s, args)

    if args.o:  # 使用MessagePack储存训练后的网络
        info = s.get_info()
        save_model_info(args.o, info)
    
    if args.plot:  # 绘制误差图表
        from plot import plot_errors
        plot_errors(errs, errs_max, args.plot)

'''
那些在雨中哭泣和拥抱着的影子，
肆意践踏着玫瑰花的舞步。
仲夏转瞬即逝。
'''