// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.h"
// #include <stdio.h> 


#include "custypedef.h"


//--------------------- Baseline -----------------------//
#define BUFFER_DEPTH 128
extern "C" {
void paramRead(
                DATA_TYPE *B_in,
				uint data_num,
				REFERENCE_STREAM(k2k<vec_type>, 16, weight_channels)
				)
{
	#pragma HLS INTERFACE m_axi port = B_in       offset = slave bundle = gmem1 num_read_outstanding=1 max_read_burst_length=1
	#pragma HLS INTERFACE axis  port = weight_channels depth=16
	DATA_TYPE weights[DATA_SIZE_N][DATA_SIZE_K*DATA_SIZE_K];
	volatile int Prcnt = 0;
	for(uint k = 0; k<DATA_SIZE_M; k++){
		
		//load kernel
		for(uint n = 0; n<DATA_SIZE_N; n++){
			
			for(uint j = 0; j<DATA_SIZE_K*DATA_SIZE_K; j++){
				// #pragma HLS unroll
				weights[n][j] = B_in[k * DATA_SIZE_K * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K * DATA_SIZE_K + j];
			}
		}
		
		for(uint h = 0; h < DATA_SIZE_H - DATA_SIZE_K + 1; h++){
			for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1); w++){
				for(uint n = 0; n<DATA_SIZE_N; n++){
					for(uint s = 0; s<DATA_SIZE_K; s++){
						k2k<vec_type> _trans_bottom_ori;
						_trans_bottom_ori.data(GET_BIT(weights[n][s])-1, 0) = weights[n][0+s];
						_trans_bottom_ori.data(GET_BIT(DATA_TYPE)*2-1, GET_BIT(DATA_TYPE)) = weights[n][0+DATA_SIZE_K+s];
						_trans_bottom_ori.data(GET_BIT(DATA_TYPE)*3-1, GET_BIT(DATA_TYPE)*2) = weights[n][0+2*DATA_SIZE_K+s];
						weight_channels.write(_trans_bottom_ori);
						++Prcnt;
					}
				}
			}
		}
	}
}

}
