#include "Anet.h"

Anet::Anet()
{
    //Init();
}

Conv_Layer Anet::ConvLayerInit(int inChannels, int inHeight, int inWidth,\
                               int outChannels, int kernelHeight, int kernelWidth,\
                               Strides strides,\
                               Padding padding)
{
    Conv_Layer c1;

    vector<vector<vector<double>>> Img;
    Img.resize(inChannels);
    for (int i = 0; i<inChannels; i++) {
        Img[i].resize(inHeight);
    }
    for (int i = 0; i<inChannels; i++) {
        for (int j = 0; j<inHeight; j++) {
            Img[i][j].resize(inWidth);
        }
    }
    c1.input = Img;

    c1.height = inHeight;//输入图像的高
    c1.width = inWidth;//输入图像的宽
    c1.inChannels = inChannels;//输入图像的通道、个数

    //输入卷积核 outChannels * inChannels * x*y
    vector<vector<vector<vector<double>>>> kernels;
    kernels.resize(outChannels);
    for (int o = 0; o<outChannels; o++) {
        kernels[o].resize(inChannels);
    }
    for (int o = 0; o<outChannels; o++) {
        for (int i = 0; i<inChannels; i++) {
            kernels[o][i].resize(kernelHeight);
        }
    }
    for (int o = 0; o<outChannels; o++) {
        for (int i = 0; i<inChannels; i++) {
            for (int h = 0; h<kernelHeight; h++) {
                kernels[o][i][h].resize(kernelWidth);
            }
        }
    }
    //随机-1-1初始化
    srand((unsigned)time(NULL));
    for (int o = 0; o<outChannels; o++) {
        for (int i = 0; i<inChannels; i++) {
            for (int h = 0; h<kernelHeight; h++) {
                for (int w = 0; w<kernelWidth; w++) {
                    kernels[o][i][h][w] = rand() / double(RAND_MAX) * 2.0 - 1.0;
                }
            }
        }
    }

    c1.kernels = kernels;
    c1.kernels_Height = kernelHeight;//卷积核高
    c1.kernels_Width = kernelWidth;//卷积核宽

    c1.strides = strides;//水平 垂直步长
    c1.padding = padding;//填充方式


    //计算填充层数  and  初始化输出尺寸
    int px = 0, py = 0;
    int outHeight = 0, outWidth = 0;
    switch (padding)
    {
        case valid:
        {
            //不填充 尺寸变小
            outHeight = (c1.height - c1.kernels_Height)/strides.Y + 1;
            outWidth = (c1.width - c1.kernels_Width)/strides.X + 1;
            px = 0;
            py = 0;
            break;
        }
        case same:
        {
            //输出与输入尺寸相同  进行填充
            outHeight = c1.height;
            outWidth = c1.width;
            //采取不等式 计算填充层数
            double lowx = double((c1.width - 1) * strides.X + c1.kernels_Width - c1.width) / 2.0;
            double lowy = double((c1.height - 1) * strides.Y + c1.kernels_Height - c1.height) / 2.0;
            double upx = double(c1.width*(strides.X-1) + c1.kernels_Width) / 2.0;
            double upy = double(c1.height*(strides.Y-1) + c1.kernels_Height) / 2.0;
            if(((lowx-floor(lowx))==0))
            {
                px = floor(lowx);
            }else if (((lowx-floor(lowx))>0)&&((floor(lowx)+1)<upx))
            {
                px = floor(lowx)+1;
            }

            if(((lowy-floor(lowy))==0))
            {
                py = floor(lowy);
            }else if (((lowy-floor(lowy))>0)&&((floor(lowy)+1)<upy))
            {
                py = floor(lowy)+1;
            }
            //若px py取值有问题 则不填充
            if(px<=0) px = 0;
            if(py<=0) py = 0;
            break;
        }
        default:
            qDebug()<<"padding error:for valid or same...";
            break;
    }

    c1.pad_X = px;//水平填充圈
    c1.pad_Y = py;//竖直填充圈

    c1.outChannels = outChannels;//输出图像的通道、个数(前面参数计算得到)

    vector<double> bias;
    bias.resize(outChannels);
    c1.bias = bias;//偏置 outchannels个

    vector<vector<vector<double>>> y; //未经过激活函数
    y.resize(outChannels);
    for (int c = 0; c<outChannels; c++) {
        y[c].resize(outHeight);
    }
    for (int c = 0; c<outChannels; c++) {
        for (int h = 0; h<outHeight; h++) {
            y[c][h].resize(outWidth);
        }
    }
    c1.y = y;
    c1.Y = y; //激活之后
    c1.d = y;
    return c1;
}

Pool_Layer Anet::PoolLayerInit(int inChannels, int inHeight, int inWidth,\
                               PoolSize poolSize,\
                               Strides strides)
{
    Pool_Layer p1;
    p1.height = inHeight;
    p1.width = inWidth;
    p1.inChannels = inChannels;
    p1.outChannels = inChannels;

    p1.poolSize = poolSize;//池化的长宽
    p1.strides = strides;//池化的步长

    //输出后结果初始化
    int outHeight = (inHeight - poolSize.X) / strides.X + 1;
    int outWidth = (inWidth - poolSize.Y) / strides.Y + 1;

    vector<vector<vector<double>>> output;
    output.resize(inChannels);
    for (int c = 0; c<inChannels; c++) {
        output[c].resize(outHeight);
    }
    for (int c = 0; c<inChannels; c++) {
        for (int y = 0; y<outHeight; y++) {
            output[c][y].resize(outWidth);
        }
    }

    p1.y = output;
    p1.d = output;

    vector<vector<vector<double>>> maxPosition;//最大池化的最大位置  标记
    maxPosition.resize(inChannels);
    for (int c = 0; c<inChannels; c++) {
        maxPosition[c].resize(inHeight);
    }
    for (int c = 0; c<inChannels; c++) {
        for (int y = 0; y<inHeight; y++) {
            maxPosition[c][y].resize(inWidth);
        }
    }

    p1.maxPosition = maxPosition;
    return p1;

}

Out_Layer Anet::OutLayerInit(int inChannels, int inHeight, int inWidth,int neuronsNums)
{
    Out_Layer o1;
    o1.height = inHeight;
    o1.width = inWidth;
    o1.inChannels = inChannels;

    vector<double> x;//将c*h*w 转为一维向量
    x.resize(inHeight * inWidth * inChannels);
    o1.x = x;

    o1.neuronsNums = neuronsNums;
    vector<vector<double>> w;//权值   neuronsNums * x.size()
    w.resize(neuronsNums);
    for (int i = 0; i<neuronsNums ; i++) {
        w[i].resize(inHeight * inWidth * inChannels);
    }

    //随机-1-1初始化
    srand((unsigned)time(NULL));
    for (int i = 0; i<neuronsNums; i++) {
        for (int j = 0; j<inHeight*inWidth*inChannels; j++) {
            w[i][j] = rand() / double(RAND_MAX) * 2.0 - 1.0;
        }
    }
    o1.w = w;

    vector<double> bias;//偏置 等于神经元个数
    bias.resize(neuronsNums);
    o1.bias = bias;

    o1.d = bias;
    o1.y = bias;
    o1.Y = bias;
    //vector<double> y;//未经过softmax层 输出
    //vector<double> Y;//经过softmax层 输出
    return o1;
}

vector<vector<vector<double>>> minmaxImg(QImage *m_Img)
{
    vector<vector<vector<double>>> Img;
    Img.resize(3);
    for (int i = 0; i<3; i++) {
        Img[i].resize(28);
    }
    for (int i = 0; i<3; i++) {
        for (int j = 0; j<28; j++) {
            Img[i][j].resize(28);
        }
    }

    double minR = 255.0, minG = 255.0, minB = 255.0;
    double maxR = 0.0, maxG = 0.0, maxB = 0.0;
    for(int y = 0; y<m_Img->height(); y++)
    {
        QRgb * line = (QRgb *)m_Img->scanLine(y);
        for(int x = 0; x<m_Img->width(); x++)
        {
            Img[0][y][x] = qRed(line[x]);
            Img[1][y][x] = qGreen(line[x]);
            Img[2][y][x] = qBlue(line[x]);

            minR = min(minR, double(qRed(line[x])));
            minG = min(minG, double(qGreen(line[x])));
            minB = min(minB, double(qBlue(line[x])));

            maxR = max(maxR, double(qRed(line[x])));
            maxG = max(maxG, double(qGreen(line[x])));
            maxB = max(maxB, double(qBlue(line[x])));
        }
    }

    //qDebug()<<"min:"<<minR<<"\t"<<minG<<"\t"<<minB;
    //qDebug()<<"max:"<<maxR<<"\t"<<maxG<<"\t"<<maxB;
    //归一化
    for(int y = 0; y<m_Img->height(); y++)
    {
        for(int x = 0; x<m_Img->width(); x++)
        {
            Img[0][y][x] = double(Img[0][y][x] - minR) / double(maxR - minR);
            Img[1][y][x] = double(Img[1][y][x] - minG) / double(maxG - minG);
            Img[2][y][x] = double(Img[2][y][x] - minB) / double(maxB - minB);
        }
    }
    return Img;
}


Net Anet::Init()
{
    vector<vector<vector<double>>> Imgx = {{{3,3,2,1,0},{0,0,1,3,1},{0,0,1,3,1},{3,1,2,2,3},{2,0,0,2,2}},\
                                          {{3,1,2,2,3},{2,0,0,2,2},{0,0,1,3,1},{2,0,0,0,1},{2,0,0,0,1}},\
                                          {{2,0,0,0,1},{2,0,0,0,1},{0,0,1,3,1},{2,0,0,0,1},{0,0,1,3,1}},\
                                          {{3,3,2,1,0},{0,0,1,3,1},{0,0,1,3,1},{3,1,2,2,3},{2,0,0,2,2}}};

    QImage *m_Img = new QImage();
    m_Img->load("C:\\Users\\wxf\\Desktop\\test\\2.png");

    Net net;
    net.C1 = ConvLayerInit(3,28,28,6,5,5,{1,1},valid);
    net.C1.input = minmaxImg(m_Img);
    net.P2 = PoolLayerInit(6,24,24,{2,2},{2,2});
    net.C3 = ConvLayerInit(6,12,12,12,5,5,{1,1},valid);
    net.P4 = PoolLayerInit(12,8,8,{2,2},{2,2});
    net.O5 = OutLayerInit(12,4,4,3);

    qDebug()<<"Init finished.";

    int num = 0;
    while (num<60) {
        //qDebug()<<"*********************";
        num++;
        for (int i = 0; i<15; i++) {
            m_Img->load("C:\\Users\\wxf\\Desktop\\test\\"+QString(i+1)+".png");
            //第i张图片
            int label = i / 5;
            vector<double> t = {0,0,0};
            t[label] = 1;

            convN(minmaxImg(m_Img), net.C1);
            poolN(net.C1.Y, net.P2);
            convN(net.P2.y, net.C3);
            poolN(net.C3.Y, net.P4);
            fc(net.P4.y, net.O5);

            double l = 0.0;
            for (int j = 0; j<t.size(); j++) {
                l = l - log(net.O5.Y[j]) * t[j];
            }
            //qDebug()<<net.O5.Y<<"\t"<<l;
            //show(net);
            bp(net, t);

            double study = 0.03 - 0.029*i / (15 - 1);
            updataPara(net, study, t);
            //qDebug()<<net.C1.bias;
            //qDebug()<<net.C3.bias;
            //qDebug()<<net.O5.bias;

            //qDebug()<<net.O5.Y;

        }
        qDebug()<<"***cycle::"<<num<<" finished***";
    }
    qDebug()<<net.O5.Y;
    qDebug()<<"+++++++++++++++";
    QImage *m_Img6 = new QImage();
    m_Img6->load("C:\\Users\\wxf\\Desktop\\test\\3.png");
    convN(minmaxImg(m_Img6), net.C1);
    poolN(net.C1.Y, net.P2);
    convN(net.P2.y, net.C3);
    poolN(net.C3.Y, net.P4);
    fc(net.P4.y, net.O5);
    qDebug()<<net.O5.Y;


    QImage *m_Img1 = new QImage();
    m_Img1->load("C:\\Users\\wxf\\Desktop\\test\\22.png");
    convN(minmaxImg(m_Img1), net.C1);
    poolN(net.C1.Y, net.P2);
    convN(net.P2.y, net.C3);
    poolN(net.C3.Y, net.P4);
    fc(net.P4.y, net.O5);
    qDebug()<<net.O5.Y;

    QImage *m_Img2 = new QImage();
    m_Img2->load("C:\\Users\\wxf\\Desktop\\test\\23.png");
    convN(minmaxImg(m_Img2), net.C1);
    poolN(net.C1.Y, net.P2);
    convN(net.P2.y, net.C3);
    poolN(net.C3.Y, net.P4);
    fc(net.P4.y, net.O5);
    qDebug()<<net.O5.Y;

    QImage *m_Img3 = new QImage();
    m_Img3->load("C:\\Users\\wxf\\Desktop\\test\\25.png");
    convN(minmaxImg(m_Img3), net.C1);
    poolN(net.C1.Y, net.P2);
    convN(net.P2.y, net.C3);
    poolN(net.C3.Y, net.P4);
    fc(net.P4.y, net.O5);
    qDebug()<<net.O5.Y;

    return net;
}

void Anet::convN(vector<vector<vector<double>>> input, Conv_Layer &cN)
{
    //对输入进行扩充
    //初始化
    vector<vector<vector<double>>> inputPad;
    inputPad.resize(cN.inChannels);
    for (int c = 0; c< inputPad.size(); c++) {
        inputPad[c].resize(cN.height+cN.pad_Y*2);
    }
    for (int c = 0; c< inputPad.size(); c++) {
        for (int h = 0; h<inputPad[0].size(); h++) {
            inputPad[c][h].resize(cN.width+cN.pad_X*2);
        }
    }
    //填入原特征
    for (int y = 0; y<cN.height; y++) {
        for (int x = 0; x<cN.width; x++) {
            for (int c = 0; c<cN.inChannels; c++) {
                inputPad[c][y+cN.pad_Y][x+cN.pad_X] = input[c][y][x];
            }
        }
    }

    //卷积操作
    int half_fx = (cN.kernels_Width-1) / 2;
    int half_fy = (cN.kernels_Height-1) / 2;
    for (int filter = 0; filter<cN.outChannels; filter++) {
        //权值共享，输入尽管有多个层数，每一次卷积产生新特征，所需要的卷积核应该也有对应层数，设置每一层的值都相同，就变为一个二维的
        //这样输入全部卷积核为三维，是对应的此次卷积操作产生filters个新特征
        //每一层特征对应的过滤器
        vector<vector<vector<double>>> thisKernel = cN.kernels[filter];
        //每一层的偏置 同样对应同一个数，只是不同特征对应不同偏置
        double thisBias = cN.bias[filter];

        //产生一层新特征
        int outputNumberY = 0, outputNumberX = 0;
        for (int y = half_fy; y<(inputPad[0].size()-half_fy); y = y+cN.strides.Y) {
            outputNumberY++;
            outputNumberX = 0;
            for (int x = half_fx; x<(inputPad[0][0].size()-half_fx); x = x+cN.strides.X) {
                outputNumberX++;
                //中心点邻域
                double sumRes = 0.0;
                for (int yy = -half_fy; yy < half_fy+1; yy++) {
                    for (int xx = -half_fx; xx < half_fx+1; xx++) {
                        for (int c = 0; c < inputPad.size(); c++) {
                            double inputPad_pixel = inputPad[c][y+yy][x+xx];
                            double kernel_value = thisKernel[c][yy+half_fy][xx+half_fx];
                            sumRes = sumRes + inputPad_pixel * kernel_value;
                        }
                    }
                }
                //未经过激活函数
                cN.y[filter][outputNumberY-1][outputNumberX-1] = sumRes + thisBias;
                //[filter]一层特征。。。。。。。。。。。
                //激活函数Lu
                if((sumRes + thisBias)<0)
                {
                    cN.Y[filter][outputNumberY-1][outputNumberX-1] = 0;
                }else
                {
                    cN.Y[filter][outputNumberY-1][outputNumberX-1] = sumRes + thisBias;
                }
            }
        }
        //qDebug()<<"all: "<<cN.outChannels<<" features::now "<<filter+1<<" finished";
    }
}

void Anet::poolN(vector<vector<vector<double>>> input, Pool_Layer &pN)
{
    //最大池化
    int outHeight = (pN.height - pN.poolSize.Y) / pN.strides.Y + 1;
    int outWidth = (pN.width - pN.poolSize.X) / pN.strides.X + 1;
    int outX = 0, outY = 0;
    for (int c = 0; c<pN.inChannels; c++) {
        for (int y = 0; y<outHeight; y++) {
            int lu_y = pN.strides.Y * y;

            for (int x = 0; x<outWidth; x++) {
                //实际位置 起点 模板左上角
                int lu_x = x * pN.strides.X;
                //计算区域内最大值
                double maxValue = 0.0;

                for (int yy = lu_y; yy<(lu_y+pN.poolSize.Y); yy++) {
                    for (int xx = lu_x; xx<(lu_x+pN.poolSize.X); xx++) {
                        if(input[c][yy][xx]>maxValue)
                        {
                            maxValue = input[c][yy][xx];
                            outX = xx;
                            outY = yy;
                        }
                    }
                }
                pN.maxPosition[c][outY][outX] = 1;//最大位置
                pN.y[c][y][x] = maxValue;//池化结果
            }
        }
    }
}

void Anet::fc(vector<vector<vector<double>>> input, Out_Layer &oL)
{
    //展成一维
    oL.x.clear();
    for (int c = 0; c<oL.inChannels; c++) {
        for (int y = 0; y<oL.height; y++) {
            for (int x = 0; x<oL.width; x++) {
                oL.x.push_back(input[c][y][x]);
            }
        }
    }
    //Affine层
    //加权计算  w 神经元数*一维展开数
    for (int i = 0; i<oL.neuronsNums; i++) {
        //经过第i个神经元
        double this_y = 0;
        for (int j = 0; j<oL.x.size(); j++) {
            this_y = this_y + oL.w[i][j] * oL.x[j];
        }
        //增加偏置 输出yi
        oL.y[i] = this_y + oL.bias[i];
    }
    //softmax层
    double allExp = 0.0;
    for (int i = 0; i<oL.Y.size(); i++) {
        allExp = allExp + exp(oL.y[i]);
    }
    //qDebug()<<allExp;
    for (int i = 0; i<oL.Y.size(); i++) {
        oL.Y[i] = double(exp(oL.y[i])) / allExp;
        //if(isnan(oL.Y[i])) oL.Y[i] = 0;
    }
}

vector<vector<vector<double>>> upSample(vector<vector<vector<double>>> maxPostion, vector<vector<vector<double>>> input, PoolSize poolSzie)
{
    vector<vector<vector<double>>> res = maxPostion;
    //滤波器大小
    int FilterX = poolSzie.X;
    int FilterY = poolSzie.Y;

    for (int c = 0; c<res.size(); c++) {
        //每一层
        vector<vector<double>> PoolImg = input[c];
        vector<vector<double>> PoolTemp = maxPostion[c];

        for (int y = 0; y < PoolImg.size(); y++) {
            for (int x = 0; x < PoolImg[0].size(); x++) {
                //其区域
                for (int i = 0; i<FilterY; i++) {
                    for (int j = 0; j<FilterX; j++) {
                        if((y*FilterY+i)>PoolTemp.size()-1) continue;
                        if((x*FilterX+j)>PoolTemp[0].size()-1) continue;
                        if(PoolTemp[y*FilterY+i][x*FilterX+j] == 1){
                            res[c][y*FilterY+i][x*FilterX+j] = PoolImg[y][x];
                        }
                    }
                }
            }
        }
    }

    return res;
}

vector<vector<vector<double>>> derReLu(vector<vector<vector<double>>> input)
{
    vector<vector<vector<double>>> res = input;
    for (int c = 0; c<input.size(); c++) {
        for (int h = 0; h<input[0].size(); h++) {
            for (int w = 0; w<input[0][0].size(); w++) {
                if(input[c][h][w] > 0)
                {
                    res[c][h][w] = 1;
                }else
                {
                    res[c][h][w] = 0;
                }
            }
        }
    }
    return res;
}

vector<vector<double>> matMul(vector<vector<double>> x1,vector<vector<double>> x2)
{
    vector<vector<double>> y = x1;
    for (int h = 0; h<x1.size(); h++) {
        for (int w = 0; w<x1[0].size(); w++) {

            y[h][w] = x1[h][w] * x2[h][w];
        }
    }
    return y;
}

double matSum(vector<vector<double>> x1)
{
    double out = 0.0;
    for (int h = 0; h<x1.size(); h++) {
        for (int w = 0; w<x1[0].size(); w++) {
            out = out + x1[h][w];
        }
    }
    return out;
}

vector<vector<double>> matAdd(vector<vector<double>> x1,vector<vector<double>> x2)
{
    vector<vector<double>> y = x1;
    for (int h = 0; h<x1.size(); h++) {
        for (int w = 0; w<x1[0].size(); w++) {
            y[h][w] = x1[h][w] + x2[h][w];
        }
    }
    return y;
}

vector<vector<double>> matSub(vector<vector<double>> x1,vector<vector<double>> x2)
{
    vector<vector<double>> y = x1;
    for (int h = 0; h<x1.size(); h++) {
        for (int w = 0; w<x1[0].size(); w++) {
            y[h][w] = x1[h][w] - x2[h][w];
        }
    }
    return y;
}

vector<vector<double>> matSubA(vector<vector<double>> x1,double x2)
{
    vector<vector<double>> y = x1;
    for (int h = 0; h<x1.size(); h++) {
        for (int w = 0; w<x1[0].size(); w++) {
            y[h][w] = x1[h][w] * x2;
        }
    }
    return y;
}

vector<vector<double>> rotate180(vector<vector<double>> x)
{
    vector<vector<double>> y = x;
    for (int h = 0; h<x.size(); h++) {
        for (int w = 0; w<x[0].size(); w++) {
            y[h][w] = x[-h+x.size()-1][-w+x[0].size()-1];
        }
    }
    return y;
}

vector<vector<double>> convFull(vector<vector<double>> input, vector<vector<double>> kernel)
{
    int height = input.size();
    int width = input[0].size();

    int kernel_h = kernel.size();
    int kernel_w = kernel[0].size();

    int out_h = height + 2 * (kernel_h - 1);
    int out_w = width + 2 * (kernel_w - 1);

    int px = kernel_w - 1;
    int py = kernel_h - 1;
    //qDebug()<<height<<width<<kernel_h<<kernel_w;
    //填0扩充
    vector<vector<double>> inputPad;
    inputPad.resize(out_h);
    for (int i = 0; i<out_h; i++) {
        inputPad[i].resize(out_w);
    }

    for (int y = 0; y<height; y++) {
        for (int x = 0; x<width; x++) {
                inputPad[y+py][x+px] = input[y][x];
            }
        }
    //卷积  1*1步长
    vector<vector<double>> output;
    output.resize(out_h - kernel_h + 1);
    for (int i = 0; i<output.size(); i++) {
        output[i].resize(out_w - kernel_w + 1);
    }
    //计算
    //产生一层新特征
    int half_fx = (kernel_w-1) / 2;
    int half_fy = (kernel_h-1) / 2;
    int outputNumberY = 0, outputNumberX = 0;
    for (int y = half_fy; y<(inputPad.size()-half_fy); y = y++) {
        outputNumberY++;
        outputNumberX = 0;
        for (int x = half_fx; x<(inputPad[0].size()-half_fx); x = x++) {
            outputNumberX++;
            //中心点邻域
            double sumRes = 0.0;
            for (int yy = -half_fy; yy < half_fy+1; yy++) {
                for (int xx = -half_fx; xx < half_fx+1; xx++) {
                    for (int c = 0; c < inputPad.size(); c++) {
                        double inputPad_pixel = inputPad[y+yy][x+xx];
                        double kernel_value = kernel[yy+half_fy][xx+half_fx];
                        sumRes = sumRes + inputPad_pixel * kernel_value;
                    }
                }
            }
            //未经过激活函数
            output[outputNumberY-1][outputNumberX-1] = sumRes;
        }
    }
    //qDebug()<<"123";
    return output;
}

vector<vector<double>> convValid(vector<vector<double>> input, vector<vector<double>> kernel)
{
    int height = input.size();
    int width = input[0].size();

    int kernel_h = kernel.size();
    int kernel_w = kernel[0].size();

    int out_h = height;
    int out_w = width;

    //qDebug()<<height<<width<<kernel_h<<kernel_w;
    //卷积  1*1步长
    vector<vector<double>> output;
    output.resize(out_h - kernel_h + 1);
    for (int i = 0; i<output.size(); i++) {
        output[i].resize(out_w - kernel_w + 1);
    }
    //计算
    //产生一层新特征
    //qDebug()<<height<<width<<kernel_h<<kernel_w;
    //qDebug()<<output.size()<<output[0].size();

    for (int i = 0; i<output.size(); i++) {
        for (int j = 0; j<output[0].size(); j++) {
            //qDebug()<<i<<" "<<j;
            double sumRes = 0.0;
            for (int ii = 0; ii<kernel_h; ii++) {
                for (int jj = 0; jj<kernel_w; jj++) {
                    double thisPix = input[i+ii][j+jj];
                    double thisKernel = kernel[ii][jj];
                    sumRes = sumRes + thisPix * thisKernel;
                }
            }
            output[i][j] = sumRes;
        }
    }
    //qDebug()<<"123";

    //qDebug()<<output.size()<<output[0].size();
    return output;
}

void Anet::bp(Net &net, vector<double> t)
{
    //Softmax-->Affine
    //保存局部梯度到O5层中
    for (int i = 0; i<net.O5.y.size(); i++) {
        net.O5.d[i] = net.O5.y[i] - t[i];
    }
    //qDebug()<<"Softmax to Affine OK";
    //Affine->S4
    //计算E关于 O5.x 的每一个偏导 dx一维
    vector<double> dx;
    dx.resize(net.O5.x.size());
    for (int j = 0; j<net.O5.x.size(); j++) {
        double wj = 0.0;
        for (int i = 0; i<net.O5.y.size(); i++) {
            wj = wj + net.O5.d[i] * net.O5.w[i][j];
        }
        dx[j] = wj;
    }
    //将一维的转为 P4层的尺度 并存在S4层的 局部梯度
    int number = 0;
    for (int c = 0; c<net.P4.d.size(); c++) {
        for (int h = 0; h<net.P4.d[0].size(); h++) {
            for (int w = 0; w<net.P4.d[0][0].size(); w++) {
                net.P4.d[c][h][w] = dx[number];
                number++;
            }
        }
    }
    //qDebug()<<"Affine to P4 OK";
    //P4 -> C3
    //上采样 ReLu导数 存在C3层  局部梯度
    vector<vector<vector<double>>> upSample_Res = upSample(net.P4.maxPosition, net.P4.d, net.P4.poolSize);
    vector<vector<vector<double>>> derReLu_Res = derReLu(net.C3.y);
    for (int c = 0; c<net.C3.Y.size(); c++) {
        //每一层对应数相乘
        net.C3.d[c] = matMul(upSample_Res[c], derReLu_Res[c]);
        }
    //qDebug()<<"P4 to C3 OK";

    //C3->P2

    for (int j = 0; j<net.P2.d.size(); j++) {
        vector<vector<double>> d_sum;
        d_sum.resize(net.P2.y[0].size());
        for (int hh = 0; hh<net.P2.y[0].size(); hh++) {
            d_sum[hh].resize(net.P2.y[0][0].size());
        }

        for (int i = 0; i<net.C3.outChannels; i++) {
            d_sum = matAdd(d_sum, convFull(net.C3.d[i], rotate180(net.C3.kernels[i][j])));
        }
        net.P2.d[j] = d_sum;
    }
    //qDebug()<<"C3 to P2 OK";
    //P2->C1
    vector<vector<vector<double>>> upSample_Res1 = upSample(net.P2.maxPosition, net.P2.d, net.P2.poolSize);
    vector<vector<vector<double>>> derReLu_Res1 = derReLu(net.C1.y);
    for (int c = 0; c<net.C1.Y.size(); c++) {
        //每一层对应数相乘
        net.C1.d[c] = matMul(upSample_Res1[c], derReLu_Res1[c]);
    }
    //qDebug()<<"P2 to C1 OK";
}

void Anet::updataPara(Net &net, double study, vector<double> t)
{
    //更新输出层参数
    for (int i = 0; i<3; i++) {
        net.O5.bias[i] = net.O5.bias[i] - study * (net.O5.Y[i] - t[i]);
        for (int j = 0; j<192; j++) {
            net.O5.w[i][j] = net.O5.w[i][j] - study * (net.O5.Y[i] - t[i]) * net.O5.x[j];
        }
    }
    //qDebug()<<"Updata Out Layer OK";

    //更新C3层参数12*6*5*5
    for (int i = 0; i<12 ; i++) {
        net.C3.bias[i] = net.C3.bias[i] - matSum(net.C3.d[i]);
        for (int j = 0; j< 6; j++) {
            net.C3.kernels[i][j] = matSub(net.C3.kernels[i][j], matSubA(convValid(net.P2.y[j], net.C3.d[i]), study));
        }
    }
    //qDebug()<<"Updata C3 OK";
    //更新C1层 6*3*5*5
    for (int i = 0; i<6 ; i++) {
        net.C1.bias[i] = net.C1.bias[i] - matSum(net.C1.d[i]);
        for (int j = 0; j< 3; j++) {
            net.C1.kernels[i][j] = matSub(net.C1.kernels[i][j], matSubA(convValid(net.C1.input[j], net.C1.d[i]), study));
        }
    }
    //qDebug()<<"Updata C1 OK";
}

void Anet::show(Net net)
{
    qDebug()<<"*********C1*********";
    qDebug()<<"***y***";
    for (int h = 0; h<net.C1.y[0].size(); h++) {
        for (int w = 0; w<net.C1.y[0][0].size(); w++) {
            std::cout<<net.C1.y[0][h][w]<<"\t";
        }
        std::cout<<std::endl;
    }
    qDebug()<<"***Y***";
    for (int h = 0; h<net.C1.Y[0].size(); h++) {
        for (int w = 0; w<net.C1.Y[0][0].size(); w++) {
            std::cout<<net.C1.Y[0][h][w]<<"\t";
        }
        std::cout<<std::endl;
    }
    qDebug()<<"*********P2*********";
    for (int h = 0; h<net.P2.y[0].size(); h++) {
        for (int w = 0; w<net.P2.y[0][0].size(); w++) {
            std::cout<<net.P2.y[0][h][w]<<"\t";
        }
        std::cout<<std::endl;
    }

    qDebug()<<"*********C3*********";
    qDebug()<<"***y***";
    for (int h = 0; h<net.C3.y[0].size(); h++) {
        for (int w = 0; w<net.C3.y[0][0].size(); w++) {
            std::cout<<net.C3.y[0][h][w]<<"\t";
        }
        std::cout<<std::endl;
    }
    qDebug()<<"***Y***";
    for (int h = 0; h<net.C3.Y[0].size(); h++) {
        for (int w = 0; w<net.C3.Y[0][0].size(); w++) {
            std::cout<<net.C3.Y[0][h][w]<<"\t";
        }
        std::cout<<std::endl;
    }

    qDebug()<<"*********P4*********";
    for (int h = 0; h<net.P4.y[0].size(); h++) {
        for (int w = 0; w<net.P4.y[0][0].size(); w++) {
            std::cout<<net.P4.y[0][h][w]<<"\t";
        }
        std::cout<<std::endl;
    }

    qDebug()<<"*********fc*********";
    for (int i = 0; i<net.O5.Y.size(); i++) {
        std::cout<<net.O5.y[i]<<"\t";
    }
    std::cout<<std::endl;
    for (int i = 0; i<net.O5.Y.size(); i++) {
        std::cout<<net.O5.Y[i]<<"\t";
    }
    std::cout<<std::endl;
}
