// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.h"
// #include <stdio.h> 


#include "custypedef.h"


//--------------------- Baseline -----------------------//
#define BUFFER_DEPTH 128
extern "C" {
void paramRead(
                BIN_type *B_in,
				uint data_num,
				REFERENCE_STREAM(k2k<BOUT_type>, 64, weight_channels)
				)
{
	#pragma HLS INTERFACE m_axi port = B_in       offset = slave bundle = gmem1 //num_read_outstanding=1 max_read_burst_length=1
	#pragma HLS INTERFACE axis  port = weight_channels depth=64
	
	BIN_type weights[DATA_SIZE_N * PE_NUM_M];

	for(uint k = 0; k<DATA_SIZE_M / PE_NUM_M; k++){
		for(uint n = 0; n<DATA_SIZE_N; n++){
			for(uint mm = 0; mm< PE_NUM_M; mm++){
				#pragma HLS PIPELINE II=1 rewind
					weights[mm + n*PE_NUM_M] = B_in[(k*PE_NUM_M + mm) * DATA_SIZE_N + n];
			}
		}

		for(uint h = 0; h < DATA_SIZE_H - DATA_SIZE_K + 1; h++){
			for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1); w++){
				for(uint n = 0; n<DATA_SIZE_N; n++){
					for(uint vv = 0; vv< VEC_SIZE; vv++){
						// #pragma HLS PIPELINE II=1 rewind
						k2k<BOUT_type> _trans_bottom_ori;
						for(uint mm = 0; mm< PE_NUM_M; mm++){
							for(uint s = 0; s<DATA_SIZE_K; s++){
							_trans_bottom_ori.data(GET_BIT(DATA_TYPE)*(s+DATA_SIZE_K*mm+1)-1, GET_BIT(DATA_TYPE)*(s+DATA_SIZE_K*mm)) 
								= weights[mm + n*PE_NUM_M].data[s+DATA_SIZE_K*vv];
							}
						}
						weight_channels.write(_trans_bottom_ori);
					}
				}
			}
		}
	}
}
}
