package com.example.harmonet.harmtorch;

public class Conv2d implements Layer {
    int width, height, in_channel, out_channel;
    int padW, padH;
    boolean hasBias;
    int[] stride;
    float[][][] kernel;
    float[] bias;
    int param;


    public Conv2d(int in_channel, int out_channel, int[] kernel_size, int[] stride, int[] padding, boolean bias) {
        this.in_channel = in_channel;
        this.out_channel = out_channel;
        this.width = kernel_size[1];
        this.height = kernel_size[0];
        this.stride = stride.clone();
        this.padW = padding[1];
        this.padH = padding[0];
        this.hasBias = bias;
        this.kernel = new float[in_channel * out_channel][height][width];
        if (hasBias) {
            this.bias = new float[out_channel];
            this.param = in_channel * out_channel * height * width + out_channel;
        }
        else {
            this.param = in_channel * out_channel * height * width;
        }
    }

    private void setVal(Tensor in, Tensor out, int w1, int h1, int n, int i, int j, int y, int x) throws Exception {
        for(int h2 = 0; h2 < height; ++h2) {
            for(int w2 = 0; w2 < width; ++w2) {
                int w = w1 + w2 - padW;
                int h = h1 + h2 - padH;
                if (w >= 0 && w < in.dim()[3] && h >= 0 && h < in.dim()[2]) {
                    out.tensor()[out.pos(n, i, y, x)] += in.tensor()[in.pos(n, j, h, w)]
                            * kernel[i * in_channel + j][h2][w2];
                }
            }
        }
    }

    private void compute(Tensor in, Tensor out, int n, int i, int y, int x, int h1, int w1) throws Exception {
        if(hasBias) {
            out.tensor()[out.pos(n, i, y, x)] = bias[i];
        }
        for(int j = 0; j < in_channel; ++j) {
            setVal(in, out, w1, h1, n, i, j, y, x);
        }
    }

    public void computeWH(Tensor in, Tensor out, int n, int i) throws Exception {
        for(int h1 = 0, y = 0; h1 < in.dim()[2] + padH * 2 + 1 - height; h1 += stride[0], ++y) {
            for(int w1 = 0, x = 0; w1 < in.dim()[3] + padW * 2 + 1 - width; w1 += stride[1], ++x) {
                compute(in, out, n, i, y, x, h1, w1);
            }
        }
    }

    @Override
    public Tensor forward(Tensor in) throws Exception {
        int[] out_dim = new int[4];
        out_dim[0] = in.dim()[0];
        out_dim[1] = out_channel;
        out_dim[2] = ((in.dim()[2] + 2 * padH - height) / stride[0]) + 1;
        out_dim[3] = ((in.dim()[3] + 2 * padW - width) / stride[1]) + 1;
        Tensor out = new Tensor(out_dim);
        for(int n = 0; n < in.dim()[0]; ++n) {
            for(int i = 0; i < out_channel; ++i) {
                computeWH(in, out, n, i);
            }
        }
        return out;
    }

    @Override
    public int getParam() {
        return param;
    }

    @Override
    public void init(float[] param) {
        int idx = 0;
        for(int i = 0; i < in_channel * out_channel; ++i) {
            for(int j = 0; j < height; ++j) {
                for(int k = 0; k < width; ++k) {
                    kernel[i][j][k] = param[idx];
                    ++idx;
                }
            }
        }
        for(int i = 0; i < out_channel; ++i) {
            bias[i] = param[idx];
            ++idx;
        }
    }
}
