// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.cnt_h"
//#include <stdio.cnt_h> 


#include "custypedef.h"

//--------------------- Baseline -----------------------//

// void funcUp

extern "C" {
void memWrite(
			DATA_TYPE *C_out,
			REFERENCE_STREAM(k2k<M_type>, 32, result_channels)
){
	#pragma HLS INTERFACE m_axi port = C_out       offset = slave bundle = gmem2
	#pragma HLS INTERFACE axis  port = result_channels depth=256
	DATA_TYPE r[PE_NUM_M];
	volatile int Wrcnt = 0;
	for(uint cnt_m = 0; cnt_m < DATA_SIZE_M/PE_NUM_M; cnt_m++){
		for(int cnt_h = 0; cnt_h < DATA_SIZE_H - DATA_SIZE_K + 1; cnt_h++){
			// #pragma HLS pipeline off
			for(int cnt_i = 0; cnt_i< DATA_SIZE_W - DATA_SIZE_K + 1; cnt_i++){
				#pragma HLS PIPELINE II=1
				k2k<M_type> _trans_r;
				_trans_r = result_channels.read();
				for(uint mm = 0; mm< PE_NUM_M; mm++){
					r[mm] = _trans_r.data(GET_BIT(DATA_TYPE)*(mm+1)-1, GET_BIT(DATA_TYPE)*mm);
				}
				for(uint mm = 0; mm< PE_NUM_M; mm++){
					C_out[(cnt_m*PE_NUM_M+mm) * (DATA_SIZE_W - DATA_SIZE_K + 1) * (DATA_SIZE_H - DATA_SIZE_K + 1) + cnt_h * (DATA_SIZE_W - DATA_SIZE_K + 1) + cnt_i] = r[mm];
				}
			}
		}
	}
}
}



