package experiment5;

import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;

public class CNN {
    private static final int IMAGE_SIZE = 28;
    private static final int NUM_CLASSES = 10;
    private static final int NUM_CHANNELS = 1;
    private static final int BATCH_SIZE = 64;
    private static final double LEARNING_RATE = 0.001;
    private static final int INPUT_DIM = 784;

    // 定义卷积层的参数
    static class ConvLayer {
        int num_filters; // 卷积核数量
        int filter_size; // 卷积核大小
        int stride;      // 步长
        int padding;     // 是否使用padding
        int input_dim;   // 输入的维度
        int output_dim;  // 输出的维度
        double[] filters; // 卷积核
        double[] weights; // 权重矩阵
        double[] biases;  // 偏置项
    }

    // 定义最大池化层的参数
    static class PoolLayer {
        int pool_size;   // 池化大小
        int stride;      // 步长
        int input_dim;   // 输入的维度
        int output_dim;  // 输出的维度
    }

    // 定义全连接层的参数
    static class FCLayer {
        int input_dim;   // 输入的维度
        int output_dim;  // 输出的维度
        double[] weights; // 权重矩阵
        double[] biases;  // 偏置项
    }

    private static double relu(double x) {
        return Math.max(0.0, x);
    }

    private static double relu_derivative(double x) {
        return x > 0 ? 1 : 0;
    }

    // softmax函数
    private static void softmax(double[] input, int size) {
        double maxVal = Double.NEGATIVE_INFINITY;
        for (int i = 0; i < size; i++) {
            maxVal = Math.max(maxVal, input[i]);
        }

        double sum = 0.0;
        for (int i = 0; i < size; i++) {
            input[i] = Math.exp(input[i] - maxVal);
            sum += input[i];
        }

        for (int i = 0; i < size; i++) {
            input[i] /= sum;
        }
    }

    // 卷积层前向传播
    private static void conv_forward(double[] input, ConvLayer layer, double[] output) {
        for (int f = 0; f < layer.num_filters; f++) {
            for (int i = 0; i < layer.output_dim; i++) {
                for (int j = 0; j < layer.output_dim; j++) {
                    double sum = 0.0;
                    for (int k = 0; k < layer.filter_size; k++) {
                        for (int l = 0; l < layer.filter_size; l++) {
                            int x = i * layer.stride + k - layer.padding;
                            int y = j * layer.stride + l - layer.padding;
                            if (x >= 0 && x < IMAGE_SIZE && y >= 0 && y < IMAGE_SIZE) {
                                for (int c = 0; c < NUM_CHANNELS; c++) {
                                    sum += input[(x * IMAGE_SIZE + y) * NUM_CHANNELS + c] *
                                            layer.filters[((f * NUM_CHANNELS + c) * layer.filter_size + k) * layer.filter_size + l];
                                }
                            }
                        }
                    }
                    output[(i * layer.output_dim + j) * layer.num_filters + f] = Math.max(0.0, sum + layer.biases[f]);
                }
            }
        }
    }

    // 最大池化层前向传播
    private static void pool_forward(double[] input, PoolLayer layer, double[] output) {
        for (int i = 0; i < layer.output_dim; i++) {
            for (int j = 0; j < layer.output_dim; j++) {
                for (int f = 0; f < NUM_CHANNELS; f++) {
                    double maxVal = Double.NEGATIVE_INFINITY;
                    for (int k = 0; k < layer.pool_size; k++) {
                        for (int l = 0; l < layer.pool_size; l++) {
                            int x = i * layer.stride + k;
                            int y = j * layer.stride + l;
                            if (x >= 0 && x < layer.input_dim && y >= 0 && y < layer.input_dim) {
                                maxVal = Math.max(maxVal, input[((x * layer.input_dim) + y) * NUM_CHANNELS + f]);
                            }
                        }
                    }
                    output[(i * layer.output_dim + j) * NUM_CHANNELS + f] = maxVal;
                }
            }
        }
    }

    // 全连接层前向传播
    private static void fc_forward(double[] input, FCLayer layer, double[] output) {
        for (int j = 0; j < layer.output_dim; j++) {
            double sum = 0.0;
            for (int i = 0; i < layer.input_dim; i++) {
                sum += input[i] * layer.weights[i * layer.output_dim + j];
            }
            output[j] = sum + layer.biases[j];
        }
        softmax(output, layer.output_dim);
    }

    // 计算交叉熵损失
    private static double cross_entropy_loss(double[] input, int[] target) {
        double loss = 0.0;
        for (int i = 0; i < NUM_CLASSES; i++) {
            if (target[i] == 1) {
                loss -= Math.log(input[i]);
            }
        }
        return loss;
    }

    // 初始化卷积层
    private static void init_conv_layer(ConvLayer layer, int num_filters, int filter_size, int stride, int padding, int input_dim) {
        layer.num_filters = num_filters;
        layer.filter_size = filter_size;
        layer.stride = stride;
        layer.padding = padding;
        layer.input_dim = input_dim;
        layer.output_dim = (input_dim + 2 * padding - filter_size) / stride + 1;

        layer.filters = new double[num_filters * NUM_CHANNELS * filter_size * filter_size];
        Random rand = new Random();
        for (int i = 0; i < num_filters * NUM_CHANNELS * filter_size * filter_size; i++) {
            layer.filters[i] = rand.nextDouble() - 0.5;
        }

        layer.biases = new double[num_filters];
        for (int i = 0; i < num_filters; i++) {
            layer.biases[i] = 0;
        }
    }

    // 初始化最大池化层
    private static void init_pool_layer(PoolLayer layer, int pool_size, int stride, int input_dim) {
        layer.pool_size = pool_size;
        layer.stride = stride;
        layer.input_dim = input_dim;
        layer.output_dim = (input_dim - pool_size) / stride + 1;
    }
    // 加载MNIST数据集并划分验证集
    private static void load_mnist_data(List<double[]> images, List<Integer> labels,
                                        List<double[]> validationImages, List<Integer> validationLabels) throws IOException {
        DataInputStream imageStream = new DataInputStream(new FileInputStream("D:\\Java\\projects\\practice\\AIGC\\src\\experiment4\\MNIST\\train-images.idx3-ubyte"));
        System.out.println("Image file opened successfully.");
        imageStream.skip(0);
        DataInputStream labelStream = new DataInputStream(new FileInputStream("D:\\Java\\projects\\practice\\AIGC\\src\\experiment4\\MNIST\\train-labels.idx1-ubyte"));
        System.out.println("Label file opened successfully.");
        labelStream.skip(0);

        int numImages = imageStream.readInt();
        System.out.println("Number of images: " + numImages);
        
        for (int i = 0; i < numImages; i++) {
            double[] image = new double[IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS];
            for (int j = 0; j < IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS; j++) {
                image[j] = imageStream.readUnsignedByte() / 255.0; // 读取每个像素值并归一化
            }
            int label = labelStream.readUnsignedByte(); // 读取标签
            images.add(image); // 添加图像到列表
            labels.add(label);  // 添加标签到列表

            // 每读取100张图像打印一次调试信息
            if (i % 100 == 0) {
                System.out.println("加载" + (i + 1) + " 图像和 " + (i + 1) + " labels.");
            }
        }

        // 划分验证集
        Collections.shuffle(images);
        int validationSize = (int) (numImages * 0.1);
        for (int i = 0; i < validationSize; i++) {
            validationImages.add(images.remove(0)); // 从训练集中移除并添加到验证集中
            validationLabels.add(labels.remove(0));   // 从标签集中移除并添加到验证集中
        }

        imageStream.close();
        labelStream.close();
        System.out.println("Loaded " + images.size() + " training images and " + labels.size() + " labels.");
        System.out.println("Loaded " + validationImages.size() + " validation images and " + validationLabels.size() + " validation labels.");
    }
    // 初始化全连接层
    private static void init_fc_layer(FCLayer layer, int input_dim, int output_dim) {
        layer.input_dim = input_dim;
        layer.output_dim = output_dim;

        layer.weights = new double[input_dim * output_dim];
        Random rand = new Random();
        for (int i = 0; i < input_dim * output_dim; i++) {
            layer.weights[i] = rand.nextDouble() - 0.5;
        }

        layer.biases = new double[output_dim];
        for (int i = 0; i < output_dim; i++) {
            layer.biases[i] = 0;
        }
    }

    // 加载MNIST数据集
    private static void load_mnist_data(List<double[]> images, List<Integer> labels) throws IOException {
        DataInputStream imageStream = new DataInputStream(new FileInputStream("D:\\Java\\projects\\practice\\AIGC\\src\\experiment4\\MNIST\\train-images.idx3-ubyte"));
        imageStream.skip(0);
        DataInputStream labelStream = new DataInputStream(new FileInputStream("D:\\Java\\projects\\practice\\AIGC\\src\\experiment4\\MNIST\\train-labels.idx1-ubyte"));
        labelStream.skip(0);

        int numImages = imageStream.readInt();
        System.out.println("Number of images: " + numImages);
        for (int i = 0; i < numImages; i++) {
            double[] image = new double[IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS];
            for (int j = 0; j < IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS; j++) {
                image[j] = imageStream.readUnsignedByte() / 255.0;
            }
            images.add(image);
        }

        for (int i = 0; i < numImages; i++) {
            labels.add(labelStream.readUnsignedByte());
        }

        imageStream.close();
        labelStream.close();
    }

    // 反向传播
    private static void backpropagation(double[] input, ConvLayer conv1, double[] conv1_output, PoolLayer pool1, double[] pool1_output,
                                        ConvLayer conv2, double[] conv2_output, PoolLayer pool2, double[] pool2_output,
                                        FCLayer fc, double[] fc_input, double[] fc_output, int[] target) {
        // 计算输出层的梯度并反向传播到全连接层
        double[] grad_output = new double[NUM_CLASSES];
        for (int i = 0; i < NUM_CLASSES; i++) {
            grad_output[i] = fc_output[i] - target[i];
        }

        double[] grad_input = new double[fc.input_dim];
        for (int i = 0; i < grad_input.length; i++) {
            grad_input[i] = 0;
        }

        for (int i = 0; i < fc.output_dim; i++) {
            for (int j = 0; j < fc.input_dim; j++) {
                grad_input[j] += grad_output[i] * fc.weights[j * fc.output_dim + i];
            }
            fc.biases[i] -= LEARNING_RATE * grad_output[i];
        }

        // 反向传播到第二个池化层和卷积层
        double[] grad_pool2 = new double[pool2.output_dim * pool2.output_dim * conv2.num_filters];
        for (int i = 0; i < grad_pool2.length; i++) {
            grad_pool2[i] = 0;
        }

        for (int i = 0; i < fc.input_dim; i++) {
            for (int j = 0; j < fc.output_dim; j++) {
                fc.weights[i * fc.output_dim + j] -= LEARNING_RATE * grad_output[j] * fc_input[i];
            }
        }

        for (int i = 0; i < pool2.output_dim; i++) {
            for (int j = 0; j < pool2.output_dim; j++) {
                for (int f = 0; f < conv2.num_filters; f++) {
                    double sum = 0.0;
                    int index = (i * pool2.output_dim + j) * conv2.num_filters + f;
                    for (int k = 0; k < fc.input_dim / NUM_CHANNELS; k++) {
                        for (int l = 0; l < NUM_CHANNELS; l++) {
                            sum += fc.weights[(k * NUM_CHANNELS + l) * fc.output_dim + index] * grad_output[index];
                        }
                    }
                    grad_pool2[(i * pool2.output_dim + j) * conv2.num_filters + f] = sum * relu_derivative(conv2_output[index]);
                }
            }
        }

        double[] grad_conv2 = new double[conv2.output_dim * conv2.output_dim * conv2.num_filters];
        for (int i = 0; i < grad_conv2.length; i++) {
            grad_conv2[i] = 0;
        }

        for (int i = 0; i < pool2.output_dim; i++) {
            for (int j = 0; j < pool2.output_dim; j++) {
                for (int f = 0; f < conv2.num_filters; f++) {
                    int index = (i * pool2.output_dim + j) * conv2.num_filters + f;
                    for (int p = 0; p < pool2.pool_size; p++) {
                        for (int q = 0; q < pool2.pool_size; q++) {
                            int x = i * pool2.stride + p - conv2.padding;
                            int y = j * pool2.stride + q - conv2.padding;
                            if (x >= 0 && x < conv2.output_dim && y >= 0 && y < conv2.output_dim) {
                                int index2 = (x * conv2.output_dim + y) * conv2.num_filters + f;
                                grad_conv2[index2] += grad_pool2[index];
                            }
                        }
                    }
                }
            }
        }
        // 反向传播到第二个卷积层和第一个池化层
        double[] grad_pool1 = new double[pool1.output_dim * pool1.output_dim * conv1.num_filters];
        for (int i = 0; i < grad_pool1.length; i++) {
            grad_pool1[i] = 0;
        }

        for (int i = 0; i < conv2.output_dim; i++) {
            for (int j = 0; j < conv2.output_dim; j++) {
                for (int f = 0; f < conv2.num_filters; f++) {
                    int index = (i * conv2.output_dim + j) * conv2.num_filters + f;
                    for (int p = 0; p < conv2.filter_size; p++) {
                        for (int q = 0; q < conv2.filter_size; q++) {
                            int x = i + p - conv2.padding;
                            int y = j + q - conv2.padding;
                            if (x >= 0 && x < conv2.output_dim && y >= 0 && y < conv2.output_dim) {
                                int index2 = (x * conv2.output_dim + y) * conv2.num_filters + f;
                                grad_conv2[index] += grad_pool2[index2] * conv2.weights[(p * conv2.filter_size + q) * conv2.num_filters + f];
                            }
                        }
                    }
                    grad_conv2[index] *= relu_derivative(conv2_output[index]);
                }
            }
        }

        double[] grad_conv1 = new double[conv1.output_dim * conv1.output_dim * conv1.num_filters];
        for (int i = 0; i < grad_conv1.length; i++) {
            grad_conv1[i] = 0;
        }

        for (int i = 0; i < conv2.output_dim; i++) {
            for (int j = 0; j < conv2.output_dim; j++) {
                for (int f = 0; f < conv2.num_filters; f++) {
                    int index = (i * conv2.output_dim + j) * conv2.num_filters + f;
                    for (int p = 0; p < conv2.filter_size; p++) {
                        for (int q = 0; q < conv2.filter_size; q++) {
                            int x = i + p - conv2.padding;
                            int y = j + q - conv2.padding;
                            if (x >= 0 && x < conv2.output_dim && y >= 0 && y < conv2.output_dim) {
                                int index2 = (x * conv2.output_dim + y) * conv2.num_filters + f;
                                for (int c = 0; c < conv1.num_filters; c++) {
                                    int index3 = (x / pool1.pool_size * pool1.output_dim + y / pool1.pool_size) * conv1.num_filters + c;
                                    grad_conv1[index3] += grad_conv2[index] * conv1.weights[(p * conv2.filter_size + q) * conv1.num_filters * conv2.num_filters + c * conv2.num_filters + f];
                                }
                            }
                        }
                    }
                }
            }
        }

        for (int i = 0; i < pool1.output_dim; i++) {
            for (int j = 0; j < pool1.output_dim; j++) {
                for (int f = 0; f < conv1.num_filters; f++) {
                    int index = (i * pool1.output_dim + j) * conv1.num_filters + f;
                    for (int p = 0; p < pool1.pool_size; p++) {
                        for (int q = 0; q < pool1.pool_size; q++) {
                            int x = i * pool1.stride + p - conv1.padding;
                            int y = j * pool1.stride + q - conv1.padding;
                            if (x >= 0 && x < conv1.output_dim && y >= 0 && y < conv1.output_dim) {
                                int index2 = (x * conv1.output_dim + y) * conv1.num_filters + f;
                                grad_pool1[index2] += grad_conv1[index] / (pool1.pool_size * pool1.pool_size);
                            }
                        }
                    }
                }
            }
        }

// 更新第二个卷积层的权重和偏置
        for (int f = 0; f < conv2.num_filters; f++) {
            for (int c = 0; c < conv1.num_filters; c++) {
                for (int i = 0; i < conv2.filter_size; i++) {
                    for (int j = 0; j < conv2.filter_size; j++) {
                        double sum = 0.0;
                        for (int p = 0; p < conv2.output_dim; p++) {
                            for (int q = 0; q < conv2.output_dim; q++) {
                                int x = p + i - conv2.padding;
                                int y = q + j - conv2.padding;
                                if (x >= 0 && x < pool1.output_dim && y >= 0 && y < pool1.output_dim) {
                                    int index = (p * conv2.output_dim + q) * conv2.num_filters + f;
                                    int index2 = (x * pool1.output_dim + y) * conv1.num_filters + c;
                                    sum += grad_conv2[index] * pool1_output[index2];
                                }
                            }
                        }
                        conv2.weights[(i * conv2.filter_size + j) * conv2.num_filters * conv1.num_filters + c * conv2.num_filters + f]
                                -= LEARNING_RATE * sum / (conv2.output_dim * conv2.output_dim);
                    }
                }
            }
            double sum = 0.0;
            for (int i = 0; i < conv2.output_dim; i++) {
                for (int j = 0; j < conv2.output_dim; j++) {
                    sum += grad_conv2[(i * conv2.output_dim + j) * conv2.num_filters + f];
                }
            }
            conv2.biases[f] -= LEARNING_RATE * sum / (conv2.output_dim * conv2.output_dim);
        }

// 更新第一个卷积层的权重和偏置
        for (int f = 0; f < conv1.num_filters; f++) {
            for (int c = 0; c < NUM_CHANNELS; c++) {
                for (int i = 0; i < conv1.filter_size; i++) {
                    for (int j = 0; j < conv1.filter_size; j++) {
                        double sum = 0.0;
                        for (int p = 0; p < pool1.output_dim; p++) {
                            for (int q = 0; q < pool1.output_dim; q++) {
                                int x = p + i - conv1.padding;
                                int y = q + j - conv1.padding;
                                if (x >= 0 && x < INPUT_DIM && y >= 0 && y < INPUT_DIM) {
                                    int index = (p * pool1.output_dim + q) * conv1.num_filters + f;
                                    int index2 = (x * INPUT_DIM + y) * NUM_CHANNELS + c;
                                    sum += grad_pool1[index] * input[index2];
                                }
                            }
                        }
                        conv1.weights[(i * conv1.filter_size + j) * conv1.num_filters * NUM_CHANNELS + c * conv1.num_filters + f]
                                -= LEARNING_RATE * sum / (pool1.output_dim * pool1.output_dim);
                    }
                }
            }
            double sum = 0.0;
            for (int i = 0; i < pool1.output_dim; i++) {
                for (int j = 0; j < pool1.output_dim; j++) {
                    sum += grad_pool1[(i * pool1.output_dim + j) * conv1.num_filters + f];
                }
            }
            conv1.biases[f] -= LEARNING_RATE * sum / (pool1.output_dim * pool1.output_dim);
        }
    }
    public static void main(String[] args) {
        // 初始化网络层
        ConvLayer conv1 = new ConvLayer();
        PoolLayer pool1 = new PoolLayer();
        ConvLayer conv2 = new ConvLayer();
        PoolLayer pool2 = new PoolLayer();
        FCLayer fc = new FCLayer();
        init_conv_layer(conv1, 32, 5, 1, 0, INPUT_DIM);
        init_pool_layer(pool1, 2, 2, conv1.output_dim);
        init_conv_layer(conv2, 64, 5, 1, 0, pool1.output_dim);
        init_pool_layer(pool2, 2, 2, conv2.output_dim);
        int fcInputDim = pool2.output_dim * pool2.output_dim * conv2.num_filters;
        init_fc_layer(fc, fcInputDim, NUM_CLASSES);

        List<double[]> images = new ArrayList<>();
        List<Integer> labels = new ArrayList<>();
        List<double[]> validationImages = new ArrayList<>();
        List<Integer> validationLabels = new ArrayList<>();
        try {
            load_mnist_data(images, labels, validationImages, validationLabels);
        } catch (IOException e) {
            e.printStackTrace();
            return;
        }

        // 分配网络层的输出数据空间
        double[] conv1Output = new double[conv1.num_filters * conv1.output_dim * conv1.output_dim];
        double[] pool1Output = new double[NUM_CHANNELS * pool1.output_dim * pool1.output_dim];
        double[] conv2Output = new double[conv2.num_filters * conv2.output_dim * conv2.output_dim];
        double[] pool2Output = new double[NUM_CHANNELS * pool2.output_dim * pool2.output_dim];
        double[] fcInput = new double[fc.input_dim*188];
        double[] fcOutput = new double[NUM_CLASSES];

        // 训练循环
        final int num_epochs = 1000;
        for (int epoch = 0; epoch < num_epochs; ++epoch) {
            System.out.println("Epoch " + epoch + " started.");
            for (int i = 0; i < images.size(); ++i) {
                // 确保不会超出 labels 的边界
                if (i < labels.size()) {
                    int predicted = labels.get(i);
                    // 前向传播
                    conv_forward(images.get(i), conv1, conv1Output);
                    pool_forward(conv1Output, pool1, pool1Output);
                    conv_forward(pool1Output, conv2, conv2Output);
                    pool_forward(conv2Output, pool2, pool2Output);

                    // 将pool2_output平坦化为fc_input
                    for (int j = 0; j < fc.input_dim; ++j) {
                        fcInput[j] = pool2Output[j];
                    }
                    fc_forward(fcInput, fc, fcOutput);

                    // 转换为one-hot编码
                    int[] target = new int[NUM_CLASSES];
                    int label = labels.get(i);
                    target[label] = 1;

                    // 计算损失和反向传播
                    double loss = cross_entropy_loss(fcOutput, target);
                    backpropagation(images.get(i), conv1, conv1Output, pool1, pool1Output,
                            conv2, conv2Output, pool2, pool2Output, fc, fcInput, fcOutput, target);

                    // 打印损失信息
                    if (i % 100 == 0) { // 每处理100个批次打印一次损失信息
                        System.out.println("Epoch " + epoch + ", Batch " + i + ", Loss: " + loss);
                    }
                } else {
                    System.out.println("Warning: Index " + i + " is out of bounds for labels.");
                }
            }
            // 计算训练集准确率
            int trainCorrect = 0;
            if (images.size() > 0) {
                for (int i = 0; i < images.size(); i++) {
                    conv_forward(images.get(i), conv1, conv1Output);
                    pool_forward(conv1Output, pool1, pool1Output);
                    conv_forward(pool1Output, conv2, conv2Output);
                    pool_forward(conv2Output, pool2, pool2Output);
                    for (int j = 0; j < fc.input_dim; ++j) {
                        fcInput[j] = pool2Output[j];
                    }
                    fc_forward(fcInput, fc, fcOutput);
                    int predicted = 0;
                    double maxProb = fcOutput[0];
                    for (int j = 1; j < NUM_CLASSES; j++) {
                        if (fcOutput[j] > maxProb) {
                            maxProb = fcOutput[j];
                            predicted = j;
                        }
                    }
                    if (predicted == labels.get(i)) {
                        trainCorrect++;
                    }
                }
                double trainAccuracy = (double) trainCorrect / images.size();
                System.out.println("Epoch " + epoch + " Train Accuracy: " + trainAccuracy);
            } else {
                System.out.println("No training images available.");
            }

            // 计算验证集准确率
            int validationCorrect = 0;
            if (validationImages.size() > 0) {
                for (int i = 0; i < validationImages.size(); i++) {
                    conv_forward(validationImages.get(i), conv1, conv1Output);
                    pool_forward(conv1Output, pool1, pool1Output);
                    conv_forward(pool1Output, conv2, conv2Output);
                    pool_forward(conv2Output, pool2, pool2Output);
                    for (int j = 0; j < fc.input_dim; ++j) {
                        fcInput[j] = pool2Output[j];
                    }
                    fc_forward(fcInput, fc, fcOutput);
                    int predicted = 0;
                    double maxProb = fcOutput[0];
                    for (int j = 1; j < NUM_CLASSES; j++) {
                        if (fcOutput[j] > maxProb) {
                            maxProb = fcOutput[j];
                            predicted = j;
                        }
                    }
                    if (predicted == validationLabels.get(i)) {
                        validationCorrect++;
                    }
                }
                double validationAccuracy = (double) validationCorrect / validationImages.size();
                System.out.println("Epoch " + epoch + " Validation Accuracy: " + validationAccuracy);
            } else {
                System.out.println("No validation images available.");
            }
        }

        // 释放分配的资源
        // 这里Java会自动进行垃圾回收，无需显式释放资源
    }
}