// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.h"
// #include <stdio.h> 


//这个好像没什么修改，就是加了几个pragma，看来这里不是速度瓶颈

#include "custypedef.h"


//--------------------- Baseline -----------------------//
//#define BUFFER_DEPTH 128
extern "C" {
void paramRead(
                KNL_VEC *B_in,
				uint data_num,
				REFERENCE_STREAM(k2k<knl_bag>, 256, weight_channels)
				)
{
	#pragma HLS INTERFACE m_axi port = B_in       offset = slave bundle = gmem1 //num_read_outstanding=1 max_read_burst_length=1
	#pragma HLS INTERFACE axis  port = weight_channels depth=256
	KNL_VEC weights_0[DATA_SIZE_N * KERNAL_PARALLEL];
	KNL_VEC weights_1[DATA_SIZE_N * KERNAL_PARALLEL];
	KNL_VEC weights_2[DATA_SIZE_N * KERNAL_PARALLEL];
	//volatile int Prcnt = 0;
	int kp_number=0;
	for(uint k = 0; k<DATA_SIZE_M/KERNAL_PARALLEL; k++){

		for(uint kp=0; kp<KERNAL_PARALLEL; kp++){
		
			for(uint n = 0; n<DATA_SIZE_N; n++){
			#pragma HLS PIPELINE
				// for(uint j = 0; j<DATA_SIZE_K; j++){
					// #pragma HLS unroll
				/*
				weights_0[n] = B_in[k * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 0];
				weights_1[n] = B_in[k * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 1];
				weights_2[n] = B_in[k * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 2];
				*/
//这里推荐每次读取做成一个包，每次在流水线中做成多个包就会warning，因为共用了一个传输线，在不同iteration间会产生依赖，类似变量累加的问题？

				//这里合成一个--可能有微弱的效果
				weights_0[kp * DATA_SIZE_N + n] = B_in[kp_number * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 0];
				weights_1[kp * DATA_SIZE_N + n] = B_in[kp_number * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 1];
				weights_2[kp * DATA_SIZE_N + n] = B_in[kp_number * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 2]; 
				
				// weights_0[kp * DATA_SIZE_N + n] = B_in[kp_number * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 0];
				// weights_1[kp * DATA_SIZE_N + n] = B_in[kp_number * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 1];
				// weights_2[kp * DATA_SIZE_N + n] = B_in[kp_number * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 2]; 
				
				// }
			}
			 
			kp_number++;
		}

		//load kernel
		/*
		for(uint n = 0; n<DATA_SIZE_N; n++){
		#pragma HLS PIPELINE
			
			// for(uint j = 0; j<DATA_SIZE_K; j++){
				// #pragma HLS unroll
			
			weights_0[n] = B_in[k * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 0];
			weights_1[n] = B_in[k * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 1];
			weights_2[n] = B_in[k * DATA_SIZE_K * DATA_SIZE_N + n * DATA_SIZE_K + 2];
			
			


			// }
		}
		*/
		
		for(uint h = 0; h < DATA_SIZE_H - DATA_SIZE_K + 1; h++){

			for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1); w++){ 

				for(uint n = 0; n<DATA_SIZE_N; n++){

					for(uint s = 0; s<DATA_SIZE_K; s++){
					#pragma HLS PIPELINE 
						k2k<knl_bag> _trans_bottom_ori;
						
						for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){
						#pragma HLS unroll
							_trans_bottom_ori.data(3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)-1, 3*kp*GET_BIT(DATA_TYPE)) = weights_0[kp * DATA_SIZE_N + n].data[s];
							_trans_bottom_ori.data(3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)*2-1, 3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)) = weights_1[kp * DATA_SIZE_N + n].data[s];
							_trans_bottom_ori.data(3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)*3-1, 3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)*2) = weights_2[kp * DATA_SIZE_N + n].data[s];
													
						}

						weight_channels.write(_trans_bottom_ori);

						//++Prcnt;
					}
				}
			}
		}
	}
}

}
