#include "lenet.h"
#include <time.h>
#include <stdlib.h>
#include <math.h>

const float ALPHA=0.5;
const int PADDING=2;

static void convolute_valid(float *src, float *conv, float *des, const int dh, const int dw, const int ch, const int cw)
{
    const int sw = dw + cw - 1;
    for (int d0 = 0; d0 < dh; ++d0) {
        for (int d1 = 0; d1 < dw; ++d1) {
            for (int c0 = 0; c0 < ch; ++c0) {
                for (int c1 = 0; c1 < cw; ++c1) {
                    des[d0 * dw + d1] += src[(d0 + c0)*sw + d1 + c1] * conv[c0*cw + c1];
                }
            }
        }
    }
}

static void convolute_full(float *src, float *conv, float *des, const int sh, const int sw, const int ch, const int cw)
{
    const int dw = sw + cw - 1;
    for (int s0 = 0; s0 < sh; ++s0) {
        for (int s1 = 0; s1 < sw; ++s1) {
            for (int c0 = 0; c0 < ch; ++c0) {
                for (int c1 = 0; c1 < cw; ++c1) {
                    des[(s0 + c0)*dw + s1 + c1] += src[s0*sw + s1] * conv[c0*cw + c1];
                }
            }
        }
    }
}



static void vector_x_matrix(float *src, float *mat, float *des, const int height, const int width) {
    for (int y = 0; y < width; ++y) {
        for (int x = 0; x < height; ++x) {
            des[y] += src[x] * mat[x*width + y];
        }
    }
}

static void matrix_x_vector(float *mat, float *src, float *des, const int height, const int width)
{
    for (int x = 0; x < height; ++x) {
        for (int y = 0; y < width; ++y) {
            des[x] += src[y] * mat[x*width + y];
        }
    }
}

static void convolution_forward(float *src, float *conv, float *des,float *bias,float(*active)(float), const int dh, const int dw, const int ch, const int cw, const int sn, const int dn)
{
    const int srcSize = (dh + ch - 1) * (dw + cw - 1), desSize = dh * dw, convSize = ch * cw;
    for (int y = 0; y < dn; ++y) {
        for (int x = 0; x < sn; ++x) {
            convolute_valid(src + x * srcSize, conv + (x * dn + y)*convSize, des + y*desSize, dh, dw, ch, cw);
        }
    }
    for (int i = 0; i < dn; ++i) {
        float *desMat = des + i * desSize;
        for (int j = 0; j < desSize; ++j) {
            desMat[j] = active(desMat[j] + bias[i]);
        }
    }
}

static void convolution_backward(float *src, float *conv, float *des, float *desl, float *wd, float *bd, float(*activegrad)(float), const int sh, const int sw, const int ch, const int cw, const int sn, const int dn)
{
    const int srcSize = sh * sw, desSize = (sh + ch - 1) * (sw + cw - 1), convSize = ch * cw;
    for (int x = 0; x < dn; ++x) {
        for (int y = 0; y < sn; ++y) {
            convolute_full(src + y*srcSize, conv + (x*sn + y)*convSize, des + x*desSize, sh, sw, ch, cw);
        }
    }
    for (int i = 0; i < desSize * dn; ++i) {
        des[i] *= activegrad(desl[i]);
    }
    for (int i = 0; i < sn; ++i) {
        for (int j = 0; j < srcSize; ++j) {
            bd[i] += src[i * srcSize + j];
        }
    }
    for (int x = 0; x < dn; ++x) {
        for (int y = 0; y < sn; ++y) {
            convolute_valid(desl + x *desSize, src + y *srcSize, wd + (x*sn + y)*convSize, ch, cw, sh, sw);
        }
    }
}

static void subsamp_max_forward(float *src, float *des, const int sh, const int sw, const int dh, const int dw, const int n)
{
    const int srcSize = sh * sw, desSize = dh * dw;
    const int lh = sh / dh, lw = sw / dw;
    for (int i = 0; i < n; ++i) {
        for (int d0 = 0; d0 < dh; ++d0) {
            for (int d1 = 0; d1 < dw; ++d1) {
                int x = d0 * lh * sw + d1 * lw;
                for (int l = 1; l < lh * lw; ++l) {
                    int index = (d0 * lh + l / lw) * sw + d1 * lw + l % lw;
                    x += (src[index] > src[x]) * (index - x);
                }
                des[d0 * dw + d1] = src[x];
            }
        }
        src += srcSize;
        des += desSize;
    }
}

static void subsamp_max_backward(float *desl, float *src, float *des, const int sh, const int sw, const int dh, const int dw, const int n)
{
    const int srcSize = sh * sw, desSize = dh * dw;
    const int lh = dh / sh, lw = dw / sw;
    for (int i = 0; i < n; ++i) {
        for (int s0 = 0; s0 < sh; ++s0) {
            for (int s1 = 0; s1 < sw; ++s1) {
                int x = s0 * lh * dw + s1 * lw;
                for (int l = 1; l < lh * lw; ++l) {
                    int index = (s0 * lh + l / lw) * dw + s1 * lw + l % lw;
                    x += (desl[index] > desl[x]) * (index - x);
                }
                des[x] = src[s0 * sw + s1];
            }
        }
        src += srcSize;
        des += desSize;
        desl += desSize;
    }
}

static void dot_product_forward(float *src, float *mat, float *des,float *bias, float(*active)(float), const int height, const int width) {
    vector_x_matrix(src, mat, des, height, width);
    for (int i = 0; i < width; ++i) {
        des[i] = active(des[i] + bias[i]);
    }
}

static void dot_product_backward(float *src, float *mat, float *des, float *desl, float *wd, float *bd, float(*activegrad)(float), const int height, const int width) {
    matrix_x_vector(mat, src, des, height, width);
    for (int i = 0; i < height; ++i) {
        des[i] *= activegrad(desl[i]);
    }
    for (int i = 0; i < width; ++i) {
        bd[i] += src[i];
    }
    for (int x = 0; x < height; ++x) {
        for (int y = 0; y < width; ++y) {
            wd[x * width + y] += desl[x] * src[y];
        }
    }
}

#define GETLENGTH(array) (sizeof(array)/sizeof(*(array)))

#define GETCOUNT(array)  (sizeof(array)/sizeof(float))

#define SUBSAMP_MAX_FORWARD(input,output)                               \
{                                                                       \
    subsamp_max_forward((float *)input,(float *)output,               \
                            GETLENGTH(*input),GETLENGTH(**input),       \
                            GETLENGTH(*output),GETLENGTH(**output),GETLENGTH(output));\
}

#define SUBSAMP_MAX_BACKWARD(input,inerror,outerror)                                        \
{                                                                                           \
    subsamp_max_backward((float *)input,(float *)outerror,(float *)inerror,              \
                            GETLENGTH(*outerror),GETLENGTH(**outerror),                     \
                            GETLENGTH(*inerror),GETLENGTH(**inerror), GETLENGTH(outerror)); \
}

#define DOT_PRODUCT_FORWARD(input,output,weight,bias,action)                \
{                                                                           \
    dot_product_forward((float *)input,(float *)weight,(float *)output,  \
                (float *)bias,action,GETLENGTH(weight),GETLENGTH(*weight));\
}



#define DOT_PRODUCT_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad)    \
{                                                                               \
    dot_product_backward((float *)outerror,(float *)weight,(float *)inerror, \
                        (float *)input,(float *)wd,(float *)bd,actiongrad,   \
                            GETLENGTH(weight),GETLENGTH(*weight));              \
}

#define CONVOLUTION_FORWARD(input,output,weight,bias,action)                                \
{                                                                                           \
    convolution_forward((float *)input,(float *)weight,(float *)output,(float *)bias,   \
            action,GETLENGTH(*output),GETLENGTH(**output),GETLENGTH(**weight),              \
                GETLENGTH(***weight),GETLENGTH(weight),GETLENGTH(*weight));                 \
}

#define CONVOLUTION_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad)            \
{                                                                                       \
    convolution_backward((float *)outerror,(float *)weight,(float *)inerror,         \
                        (float *)input,(float *)wd,(float *)bd,actiongrad,           \
                        GETLENGTH(*outerror),GETLENGTH(**outerror),GETLENGTH(**weight), \
                        GETLENGTH(***weight),GETLENGTH(*weight),GETLENGTH(weight));     \
}


float relu(float x) {
    return x*(x > 0);
}

float relugrad(float y) {
    return y > 0;
}

static void forward(LeNet5 *lenet, Feature *features, float(*action)(float))
{
    CONVOLUTION_FORWARD(features->input, features->layer1, lenet->weight0_1, lenet->bias0_1, action);
    SUBSAMP_MAX_FORWARD(features->layer1, features->layer2);
    CONVOLUTION_FORWARD(features->layer2, features->layer3, lenet->weight2_3, lenet->bias2_3, action);
    SUBSAMP_MAX_FORWARD(features->layer3, features->layer4);
    CONVOLUTION_FORWARD(features->layer4, features->layer5, lenet->weight4_5, lenet->bias4_5, action);
    DOT_PRODUCT_FORWARD(features->layer5, features->output, lenet->weight5_6, lenet->bias5_6, action);
}

static void backward(LeNet5 *lenet, LeNet5 *deltas, Feature *errors, Feature *features, float(*actiongrad)(float))
{
    DOT_PRODUCT_BACKWARD(features->layer5, errors->layer5, errors->output, lenet->weight5_6, deltas->weight5_6, deltas->bias5_6, actiongrad);
    CONVOLUTION_BACKWARD(features->layer4, errors->layer4, errors->layer5, lenet->weight4_5, deltas->weight4_5, deltas->bias4_5, actiongrad);
    SUBSAMP_MAX_BACKWARD(features->layer3, errors->layer3, errors->layer4);
    CONVOLUTION_BACKWARD(features->layer2, errors->layer2, errors->layer3, lenet->weight2_3, deltas->weight2_3, deltas->bias2_3, actiongrad);
    SUBSAMP_MAX_BACKWARD(features->layer1, errors->layer1, errors->layer2);
    CONVOLUTION_BACKWARD(features->input, errors->input, errors->layer1, lenet->weight0_1, deltas->weight0_1, deltas->bias0_1, actiongrad);
}

static inline void load_input(Feature *features, Image input) {
    float (*layer0)[LENGTH_FEATURE0][LENGTH_FEATURE0] = features->input;
    const int sz = sizeof(Image) / sizeof(**input);
    float mean = 0, std = 0;
    int num = sizeof(Image) / sizeof(*input);
    int num2 = sizeof(*input) / sizeof(**input);
    for(int j = 0; j < num; ++j) {
        for(int k = 0; k < num2; ++k) {
            mean += input[j][k];
            std += input[j][k] * input[j][k];
        }
    }
    mean /= sz;
    std = sqrt(std / sz - mean*mean);
    for(int j = 0; j < num; ++j) {
        for(int k = 0; k < num2; ++k) {
            layer0[0][j + PADDING][k + PADDING] = (input[j][k] - mean) / std;
        }
    }
}

static uchar get_result(float *output, uchar count) {
    uchar result = 0;
    for (uchar i = 1; i < count; ++i) {
        result += (i - result) * (output[i] > output[result]);
    }
    return result;
}

static inline void softmax(float input[OUTPUT], float loss[OUTPUT], uchar label, int count)
{
    float max = input[get_result(input, count)];
    float k = 0, inner = 0;
    for (uchar i = 0; i < count; ++i) {
        loss[i] = exp(input[i] - max);
        k += loss[i];
    }
    k = 1. / k;
    for (uchar i = 0; i < count; ++i) {
        loss[i] *= k;
        inner -= loss[i] * loss[i];
    }
    inner += loss[label];
    for (uchar i = 0; i < count; ++i) {
        loss[i] *= (i == label) - loss[i] - inner;
    }
}

void TrainBatch(LeNet5* lenet, Image* inputs, uchar* labels, int batchSize)
{
    const int num = GETCOUNT(LeNet5);
    float buffer[GETCOUNT(LeNet5)] = { 0 };
    int i = 0;
    #pragma omp parallel for
    for (i = 0; i < batchSize; ++i) {
        Feature features = { 0 };
        Feature errors = { 0 };
        LeNet5	deltas = { 0 };
        load_input(&features, inputs[i]);
        forward(lenet, &features, relu);
        softmax(features.output, errors.output, labels[i], GETCOUNT(features.output));
        backward(lenet, &deltas, &errors, &features, relugrad);
        #pragma omp critical
        {
            for(int j = 0;j < num; ++j) {
                buffer[j] += ((float *)&deltas)[j];   
            }
        }
    }
    float k = ALPHA / batchSize;
    for(i = 0; i < num; ++i) {
        ((float *)lenet)[i] += k * buffer[i];
    }
}

uchar Predict(LeNet5* lenet, Image input,uchar count) {
    Feature features = { 0 };
    load_input(&features, input);
    forward(lenet, &features, relu);
    return get_result(features.output, count);
}

void init_param(LeNet5 *lenet) {
    //srand((unsigned)time(0));
    for (float *pos = (float *)lenet->weight0_1; pos < (float *)lenet->bias0_1; *pos++ = rand()*(2. / RAND_MAX) - 1);
    for (float *pos = (float *)lenet->weight0_1; pos < (float *)lenet->weight2_3; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (INPUT + LAYER1))));
    for (float *pos = (float *)lenet->weight2_3; pos < (float *)lenet->weight4_5; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER2 + LAYER3))));
    for (float *pos = (float *)lenet->weight4_5; pos < (float *)lenet->weight5_6; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER4 + LAYER5))));
    for (float *pos = (float *)lenet->weight5_6; pos < (float *)lenet->bias0_1; *pos++ *= sqrt(6.0 / (LAYER5 + OUTPUT)));
    for (int *pos = (int *)lenet->bias0_1; pos < (int *)(lenet + 1); *pos++ = 0);
}
