

#include "Nnet.h"

Nnet::Nnet()
{
    //vector<vector<int>> Img = {{3,3,2,1,0},{0,0,1,3,1},{3,1,2,2,3},{2,0,0,2,2},{2,0,0,0,1}};
    vector<vector<vector<double>>> Img = {{{3,3,2,1,0},{0,0,1,3,1},{0,0,1,3,1},{3,1,2,2,3},{2,0,0,2,2}},\
                                          {{3,1,2,2,3},{2,0,0,2,2},{0,0,1,3,1},{2,0,0,0,1},{2,0,0,0,1}},\
                                          {{2,0,0,0,1},{2,0,0,0,1},{0,0,1,3,1},{2,0,0,0,1},{0,0,1,3,1}},\
                                          {{3,3,2,1,0},{0,0,1,3,1},{0,0,1,3,1},{3,1,2,2,3},{2,0,0,2,2}}};
    //setInput3(Img);
    //{{-1,0,-1},{0,4,0},{-1,0,-1}},\
    //{{0,1,0},{1,-4,1},{0,1,0}
    Kernels kernrls = { {{{1,2,1},{0,0,0},{-1,-2,-1}},\
                         {{1,2,1},{0,0,0},{-1,-2,-1}},\
                         {{1,2,1},{0,0,0},{-1,-2,-1}},\
                      } ,3,3};
    vector<vector<vector<double>>> z = convN(Img, 3, kernrls,{2,1},same, {0, 1, 2}, false);
    poolN(z, {3,2}, {1,1});
}

void Nnet::setInput1(vector<vector<int>> img)
{
    IsInput = false;
    Height = img.size();
    Width = img[0].size();

    if((Height == 0)||(Width == 0))
    {
        qDebug()<<"is empty!";
        return;
    }


    Channels = 1;
    Img_1 = img;
    IsInput = true;

    for (int y = 0; y<Height; y++) {
        for (int x = 0; x<Width; x++) {
            std::cout<<Img_1[y][x]<<"\t";
        }
        std::cout<<std::endl;
    }
}

void Nnet::setInput3(vector<vector<vector<int>>> img)
{
    IsInput = false;
    Height = img[0].size();
    Width = img[0][0].size();

    if((Height == 0)||(Width == 0))
    {
        qDebug()<<"is empty!";
        return;
    }

    Channels = 3;
    Img_3 = img;
    IsInput = true;

    for (int y = 0; y<Height; y++) {
        for (int x = 0; x<Width; x++) {
            std::cout<<Img_3[0][y][x]<<"\t";
        }
        std::cout<<std::endl;
    }
}

vector<vector<vector<double>>> Nnet::convN(vector<vector<vector<double>>> input, int filters, Kernels kernels, Strides strides, Padding padding, vector<double> bias, bool activation)
{
    qDebug()<<"******conv layer******";
    vector<vector<vector<double>>> output;
    output.resize(filters);

    //原尺寸 通道 宽高input
    int channels = input.size();
    int height = input[0].size();
    int width = input[0][0].size();
    //卷积核尺寸
    vector<vector<vector<double>>> kernelAll = kernels.kernel_values;
    int fx = kernels.X;
    int fy = kernels.Y;
    //步长
    int sx = strides.X;
    int sy = strides.Y;
    //计算填充层数  and  初始化输出尺寸
    int px = 0, py = 0;
    switch (padding)
    {
        case valid:
        {
            //不填充 尺寸变小
            int outHeight = (height - fy)/sy + 1;
            int outWidth = (width - fx)/sx + 1;
            px = 0;
            py = 0;
            //初始化输出尺寸
            for (int filter = 0; filter< filters; filter++) {
                output[filter].resize(outHeight);
            }
            for (int filter = 0; filter< filters; filter++) {
                for (int h = 0; h<outHeight; h++) {
                    output[filter][h].resize(outWidth);
                }
            }
            break;
        }
        case same:
        {
            //输出与输入尺寸相同  进行填充
            //采取不等式 计算填充层数
            double lowx = double((width - 1) * sx + fx - width) / 2.0;
            double lowy = double((height - 1) * sy + fy - height) / 2.0;
            double upx = double(width*(sx-1) + fx) / 2.0;
            double upy = double(height*(sy-1) + fy) / 2.0;
            if(((lowx-floor(lowx))==0))
            {
                px = floor(lowx);
            }else if (((lowx-floor(lowx))>0)&&((floor(lowx)+1)<upx))
            {
                px = floor(lowx)+1;
            }

            if(((lowy-floor(lowy))==0))
            {
                py = floor(lowy);
            }else if (((lowy-floor(lowy))>0)&&((floor(lowy)+1)<upy))
            {
                py = floor(lowy)+1;
            }
            //若px py取值有问题 则不填充
            if(px<=0) px = 0;
            if(py<=0) py = 0;

            //初始化输出尺寸
            for (int filter = 0; filter< filters; filter++) {
                output[filter].resize(height);
            }
            for (int filter = 0; filter< filters; filter++) {
                for (int h = 0; h<height; h++) {
                    output[filter][h].resize(width);
                }
            }
            break;
        }
        default:
            qDebug()<<"padding error:for valid or same...";
            break;
    }
    //对输入进行扩充
    //初始化
    vector<vector<vector<double>>> inputPad;
    inputPad.resize(channels);
    for (int c = 0; c< channels; c++) {
        inputPad[c].resize(height+py*2);
    }
    for (int c = 0; c< channels; c++) {
        for (int h = 0; h<inputPad[0].size(); h++) {
            inputPad[c][h].resize(width+px*2);
        }
    }
    //填入原特征
    for (int y = 0; y<height; y++) {
        for (int x = 0; x<width; x++) {
            for (int c = 0; c<channels; c++) {
                inputPad[c][y+py][x+px] = input[c][y][x];
            }
        }
    }

    //进行卷积
    int half_fx = (fx-1) / 2;
    int half_fy = (fy-1) / 2;

    //目前仅一次操作 需要filters次  最终生成filters个特征
    //需要实现 卷积核的随机自动生成 以及 并行运算
    //卷积核 层数 与 输入的通道数对应

    for (int filter = 0; filter<filters; filter++) {

        //权值共享，输入尽管有多个层数，每一次卷积产生新特征，所需要的卷积核应该也有对应层数，设置每一层的值都相同，就变为一个二维的
        //这样输入全部卷积核为三维，是对应的此次卷积操作产生filters个新特征
        //每一层特征对应的过滤器
        vector<vector<double>> thisKernel = kernelAll[filter];
        //每一层的偏置 同样对应同一个数，只是不同特征对应不同偏置
        double thisBias = bias[filter];

        //产生一层新特征
        int outputNumberY = 0, outputNumberX = 0;
        for (int y = half_fy; y<(inputPad[0].size()-half_fy); y = y+sy) {
            outputNumberY++;
            outputNumberX = 0;
            for (int x = half_fx; x<(inputPad[0][0].size()-half_fx); x = x+sx) {
                outputNumberX++;
                //中心点邻域
                double sumRes = 0.0;
                for (int yy = -half_fy; yy < half_fy+1; yy++) {
                    for (int xx = -half_fx; xx < half_fx+1; xx++) {
                        for (int c = 0; c < inputPad.size(); c++) {
                            double inputPad_pixel = inputPad[c][y+yy][x+xx];
                            double kernel_value = thisKernel[yy+half_fy][xx+half_fx];
                            sumRes = sumRes + inputPad_pixel * kernel_value;
                        }
                    }
                }
                //[filter]一层特征。。。。。。。。。。。
                //激活函数Lu
                if((activation)&&((sumRes + thisBias)<0))
                {
                    output[filter][outputNumberY-1][outputNumberX-1] = 0;
                }else
                {
                    output[filter][outputNumberY-1][outputNumberX-1] = sumRes + thisBias;
                }
            }
        }
        qDebug()<<"all: "<<filters<<" features::now "<<filter+1<<" finished";
    }

    qDebug()<<"*************";
    qDebug()<<"输入::channels:"<<channels<<" || H*W:"<<height<<"*"<<width;
    qDebug()<<"卷积::"<<fx<<"*"<<fy;
    qDebug()<<"步长::"<<sx<<"*"<<sy;
    qDebug()<<"填充::"<<px<<"*"<<py;
    qDebug()<<"输出::channels:"<<output.size()<<" || H*W:"<<output[0].size()<<"*"<<output[0][0].size();
    qDebug()<<"*************";
    /*
    for (int y = 0; y<input[0].size(); y++) {
        for (int x = 0; x<input[0][0].size(); x++) {
            std::cout<<input[0][y][x]<<"\t";
        }
        std::cout<<std::endl;
    }
    qDebug()<<"*************";
    for (int y = 0; y<inputPad[0].size(); y++) {
        for (int x = 0; x<inputPad[0][0].size(); x++) {
            std::cout<<inputPad[0][y][x]<<"\t";
        }
        std::cout<<std::endl;
    }
    qDebug()<<"*************";

    for (int c = 0; c<filters; c++) {
        qDebug()<<"***all: "<<filters<<" features::now "<<c+1<<" show***";
        for (int y = 0; y<output[0].size(); y++) {
            for (int x = 0; x<output[0][0].size(); x++) {
                std::cout<<output[c][y][x]<<"\t";
            }
            std::cout<<std::endl;
        }
    }
    */
    return output;
}

vector<vector<vector<double>>> Nnet::poolN(vector<vector<vector<double>>> input, PoolSize poolSize,Strides strides)
{
    //设置最大池化的模板
    vector<vector<vector<double>>> output_Label = input;
    output_Label.clear();

    qDebug()<<"******pool layer******";
    //原尺寸 通道 宽高input
    int channels = input.size();
    int height = input[0].size();
    int width = input[0][0].size();

    //池化的尺寸
    int poolX = poolSize.X;
    int poolY = poolSize.Y;

    //步长
    int sx = strides.X;
    int sy = strides.Y;

    //输出后结果初始化
    int outHeight = (height - poolX) / sx + 1;
    int outWidth = (width - poolY) / sy + 1;

    vector<vector<vector<double>>> output;
    output.resize(channels);
    for (int c = 0; c<channels; c++) {
        output[c].resize(outHeight);
    }
    for (int c = 0; c<channels; c++) {
        for (int y = 0; y<outHeight; y++) {
            output[c][y].resize(outWidth);
        }
    }

    qDebug()<<"*************";
    qDebug()<<"输入::channels:"<<channels<<" || H*W:"<<height<<"*"<<width;
    qDebug()<<"区域::"<<poolX<<"*"<<poolY;
    qDebug()<<"步长::"<<sx<<"*"<<sy;
    qDebug()<<"输出::channels:"<<output.size()<<" || H*W:"<<output[0].size()<<"*"<<output[0][0].size();
    //最大池化

    int outX = 0, outY = 0;
    for (int c = 0; c<channels; c++) {
        qDebug()<<"***all: "<<channels<<" features::now "<<c+1<<" finished***";
        for (int y = 0; y<outHeight; y++) {
            int lu_y = sy * y;
            for (int x = 0; x<outWidth; x++) {
                //实际位置 起点 模板左上角
                int lu_x = x * sx;
                //计算区域内最大值
                double maxValue = 0.0;

                for (int yy = lu_y; yy<(lu_y+poolY); yy++) {
                    for (int xx = lu_x; xx<(lu_x+poolX); xx++) {
                        if(input[c][yy][xx]>maxValue)
                        {
                            maxValue = input[c][yy][xx];
                            outX = xx;
                            outY = yy;
                        }
                    }
                }
                output_Label[c][outY][outX] = 1;
                output[c][y][x] = maxValue;
            }
        }
    }

    /*

    for (int c = 0; c<channels; c++) {
        qDebug()<<"***all: "<<channels<<" features::now "<<c+1<<" show***";
        for (int y = 0; y<output[0].size(); y++) {
            for (int x = 0; x<output[0][0].size(); x++) {
                std::cout<<output[c][y][x]<<"\t";
            }
            std::cout<<std::endl;
        }
    }
    */
    return output;
}

vector<double> fc(vector<vector<vector<double>>> input,int neuronsNums, vector<vector<double>> w, vector<double> bias)
{
    qDebug()<<"******fc layer******";
    //原尺寸 通道 宽高input
    int channels = input.size();
    int height = input[0].size();
    int width = input[0][0].size();
    //展开为一维
    int oneDimSize = channels * height * width;
    vector<double> input_OneDim;
    input_OneDim.resize(oneDimSize);

    for (int c = 0; c<channels; c++) {
        for (int y = 0; y<height; y++) {
            for (int x = 0; x<width; x++) {
                input_OneDim.push_back(input[c][y][x]);
            }
        }
    }
    //Affine层
    vector<double> output;
    output.resize(neuronsNums);
    //加权计算  w 神经元数*一维展开数
    for (int i = 0; i<neuronsNums; i++) {
        //经过第i个神经元
        double this_y = 0;
        for (int j = 0; j<oneDimSize; j++) {
            this_y = this_y + w[i][j] * input_OneDim[j];
        }
        //增加偏置 输出yi
        output[i] = this_y + bias[i];
    }

    //softmax层
    double allExp = 0.0;
    for (int i = 0; i<output.size(); i++) {
        allExp = allExp + exp(output[i]);
    }

    for (int i = 0; i<output.size(); i++) {
        output[i] = double(exp(output[i])) / allExp;
    }
    return output;
}


void Nnet::cnn()
{
    //手写数字识别
    //输入28*28
    vector<vector<vector<double>>> input;
    //卷积层C1  6个5*5卷积核  步长1*1  valid模式  Relu激活函数   输出:6*24*24
    Kernels kernels_C1;
    vector<double> bias_C1;
    vector<vector<vector<double>>> Y_C1 = convN(input, 6, kernels_C1,{1,1},valid, bias_C1, true);
    //池化层S2    输出:6*12*12
    vector<vector<vector<double>>> Y_S2 = poolN(Y_C1, {2,2}, {2,2});
    //卷积层C3  12个5*5卷积核  步长1*1  valid模式  Relu激活函数   输出:12*8*8
    Kernels kernels_C3;
    vector<double> bias_C3;
    vector<vector<vector<double>>> Y_C3 = convN(Y_S2, 12, kernels_C3,{1,1},valid, bias_C3, true);
    //池化层S2    输出:12*4*4
    vector<vector<vector<double>>> Y_S4 = poolN(Y_C3, {2,2}, {2,2});
    //全连接层FC
    vector<vector<double>> w_FC;
    vector<double> bias_FC;
    vector<double> Y = fc(Y_S4,10, w_FC, bias_FC);
    //计算交叉熵
    vector<int> label;
    double E = 0.0;
    for (int i = 0; i<label.size(); i++) {
        E = E - label[i] * log(Y[i]);
    }




    int channels = Y_S4.size();
    int height = Y_S4[0].size();
    int width = Y_S4[0][0].size();
    //展开为一维
    int oneDimSize = channels * height * width;
    vector<double> X;
    X.resize(oneDimSize);

    for (int c = 0; c<channels; c++) {
        for (int y = 0; y<height; y++) {
            for (int x = 0; x<width; x++) {
                X.push_back(Y_S4[c][y][x]);
            }
        }
    }

    //反向传播更新参数
    double alpha = 0.0;//学习率
    //更新 全连接层参数
    for (int i = 0; i<w_FC.size(); i++) {
        bias_FC[i] = bias_FC[i] - alpha * (Y[i]-label[i]);
        for (int j = 0; j<w_FC[0].size(); j++) {

            w_FC[i][j] = w_FC[i][j] - alpha * (Y[i]-label[i]) * X[j];
        }
    }

    //C3层参数更新
    //先不上采样  直接采用池化前
}
