// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.h"
//#include <stdio.h> 

#include "custypedef.h"

//--------------------- Baseline -----------------------//



void read(  REFERENCE_STREAM(k2k<knl_bag>, 256, weight_channels), 
			REFERENCE_STREAM(k2k<vec_type>, 256, img_channels), 
			hls::stream<img_bag, 256>& trans_img,
			hls::stream<knl_bag, 256>& trans_knl){
			
			
			k2k<knl_bag> _trans_knl;
			//做成pipeleine
			//从fmap中读取一个竖着的三个长的条
			k2k<vec_type> _trans_img;
			//打包发送到下游
			img_bag imgBag;
			knl_bag knlBag;	

			Loop1:for(uint j=0; j<DATA_SIZE_K*DATA_SIZE_N*(DATA_SIZE_W - DATA_SIZE_K + 1)*(DATA_SIZE_H - DATA_SIZE_K + 1)*(DATA_SIZE_M/KERNAL_PARALLEL); j++){//DATA_SIZE_K是卷积核的尺寸，为3
			#pragma HLS PIPELINE

				_trans_img = img_channels.read();
			
				//读取需要的kernel数据，也是三个长的条

				_trans_knl = weight_channels.read();							



				//转化回来，变成向量
				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					imgBag.data[s] = _trans_img.data(GET_BIT(imgBag.data[s])*(s+1)-1, GET_BIT(imgBag.data[s])*s);
				}

				for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){
					#pragma HLS unroll
					for(uint s = 0; s<VEC_SIZE; s++){
						#pragma HLS unroll
						knlBag.data[kp * VEC_SIZE + s] = _trans_knl.data(3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)*(s+1)-1, 3*kp*GET_BIT(DATA_TYPE)+GET_BIT(DATA_TYPE)*s);
					}
				}

				trans_knl.write(knlBag);
				trans_img.write(imgBag);
				
			}

}


void doCompute_mul(hls::stream<img_bag, 256>& trans_img,
			hls::stream<knl_bag, 256>& trans_knl,
			hls::stream<result_bag_mul, 256>& trans_result_mul){

    result_bag_mul resultBagMul;
	img_bag imgBag;
	knl_bag knlBag;


	// #pragma HLS PIPELINE off
	Loop2:for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1)*(DATA_SIZE_H - DATA_SIZE_K + 1)*(DATA_SIZE_M/KERNAL_PARALLEL)*DATA_SIZE_K*DATA_SIZE_N; w++){//和上面类似，DATA_SIZE_W是输入图片的高宽度
	#pragma HLS PIPELINE 

		imgBag = trans_img.read();
		knlBag = trans_knl.read();

		//对得到的两个条的每个位置做乘法
		for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){
		#pragma HLS unroll
			for(int k=0; k<VEC_SIZE; k++){
			#pragma HLS unroll
				resultBagMul.data[kp * VEC_SIZE + k]= knlBag.data[kp * VEC_SIZE + k] * imgBag.data[k];
			}							
		}

		trans_result_mul.write(resultBagMul);
	}
}


void doCompute_sum(hls::stream<result_bag_mul, 256>& trans_result_mul,
				   hls::stream<result_bag_sum, 256>& trans_result_sum){

    result_bag_mul resultBagMul;
	result_bag_sum resultBagSum[STEP+1];
	result_bag_sum resultBagTotal[STEP+1];
	DATA_TYPE tmp = 0;

	int count=0;




	// 这个循环内同时执行多次卷积，四个卷积核并行
	for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1)*(DATA_SIZE_H - DATA_SIZE_K + 1)*(DATA_SIZE_M/KERNAL_PARALLEL); w++){//和上面类似，DATA_SIZE_W是输入图片的高宽度
	#pragma HLS PIPELINE off

		
		//对分布式累加结果存储区域清零
		for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){
		#pragma HLS unroll
			for(uint step=0; step<STEP+1; step++){
			#pragma HLS unroll
				resultBagSum[step].data[kp] = 0;
				resultBagTotal[step].data[kp] = 0;
			}
		}

		 
		//这个循环内完成四个卷积核在单个位置的所有通道的卷积操作的结果进行分别按照每个核分布式累加（一次卷积）
		Loop3:for(uint j=0; j<DATA_SIZE_K*DATA_SIZE_N; j++){//DATA_SIZE_K是卷积核的尺寸，为3
		#pragma HLS loop_flatten off
		#pragma HLS PIPELINE

			resultBagMul = trans_result_mul.read();
			
			//之后加起来

			for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){ 



				for(int k=0; k<VEC_SIZE; k++){
					#pragma HLS unroll

					//resultBagSum[STEP+1].data[kp] += resultBagMul.data[kp * VEC_SIZE + k];

					tmp = resultBagSum[STEP-1].data[kp] + resultBagMul.data[kp * VEC_SIZE + k];


					for (uint jj = STEP-1; jj > 0; jj--){
					#pragma HLS unroll
						resultBagSum[jj].data[kp] = resultBagSum[jj-1].data[kp];

					}

					resultBagSum[0].data[kp] = tmp; 

				}	
				
			}

		}


		//将一次卷积的分布式累加结果求和，获得四个核的各自的单次卷积结果
		for(uint kp=0; kp<KERNAL_PARALLEL; kp++){
			#pragma HLS unroll
				resultBagTotal[0].data[kp] = resultBagSum[0].data[kp]; 
				for (uint ii = 1; ii < STEP; ii++){
				#pragma HLS unroll
					resultBagTotal[ii].data[kp] = resultBagTotal[ii-1].data[kp] + resultBagSum[ii].data[kp];

			}				
		}


		trans_result_sum.write(resultBagTotal[STEP-1]);
	}
}





void write( REFERENCE_STREAM(k2k<result_bag>, 256, result_channels),
			hls::stream<result_bag_sum, 256>& trans_result_sum){

    // result_bag_sum resultBagSum;
	// k2k<DATA_TYPE> _trans_acc[KERNAL_PARALLEL];
	// uint kp1=0;

	// Loop4:for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1)*(DATA_SIZE_H - DATA_SIZE_K + 1)*(DATA_SIZE_M/KERNAL_PARALLEL)*KERNAL_PARALLEL; w++){//和上面类似，DATA_SIZE_W是输入图片的高宽度
	// #pragma HLS pipeline off


	// 	if(kp1==0){
	// 		resultBagSum = trans_result_sum.read();			
	// 	}


	// 	//for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){
			
	// 	_trans_acc[kp1].data(GET_BIT(resultBagSum.data[0])-1, 0) = resultBagSum.data[kp1];
	// 	result_channels.write(_trans_acc[kp1]);

	// 	//}

	// 	kp1++;

	// 	if(kp1>=KERNAL_PARALLEL){
	// 		kp1=0;
	// 	}
	// }



	result_bag_sum resultBagSum;
	k2k<result_bag> _trans_acc;

	// #pragma HLS PIPELINE off
	Loop4:for(uint w=0; w<(DATA_SIZE_W - DATA_SIZE_K + 1)*(DATA_SIZE_H - DATA_SIZE_K + 1)*(DATA_SIZE_M/KERNAL_PARALLEL); w++){//和上面类似，DATA_SIZE_W是输入图片的高宽度
	#pragma HLS PIPELINE 
		
		
		resultBagSum = trans_result_sum.read();

		for(uint kp = 0; kp<KERNAL_PARALLEL; kp++){
		#pragma HLS unroll
			_trans_acc.data(kp*GET_BIT(DATA_TYPE)+GET_BIT(resultBagSum.data[0])-1, kp*GET_BIT(DATA_TYPE)) = resultBagSum.data[kp];
			
		}
		
		result_channels.write(_trans_acc);
	}
    


}




extern "C" {
void coreConv(
	REFERENCE_STREAM(k2k<knl_bag>, 256, weight_channels), 
	REFERENCE_STREAM(k2k<vec_type>, 256, img_channels),
	REFERENCE_STREAM(k2k<result_bag>, 256, result_channels)
)
{
	printf("try four function---snake\n");
	#pragma HLS interface ap_ctrl_none port=return // Special pragma for freerunning kernel  这里定义了这个核的控制协议，vitis默认使
	//这个核以流水的方式进行，即使没有数据进来了也会已经接收到的数据做完，这里设置为如果有数据就运行，没有数据就停滞，但是在c/rtl综合的时候可能有其他限制条件
	#pragma HLS INTERFACE axis  port = weight_channels depth=256
	#pragma HLS INTERFACE axis  port = img_channels depth=256
	#pragma HLS INTERFACE axis  port = result_channels depth=256

    #pragma HLS dataflow
	hls::stream<img_bag, 256> trans_img;
	hls::stream<knl_bag, 256> trans_knl;
	hls::stream<result_bag_mul, 256> trans_result_mul;
	hls::stream<result_bag_sum, 256> trans_result_sum;

    read(weight_channels, img_channels, trans_img, trans_knl);
	doCompute_mul(trans_img, trans_knl, trans_result_mul);
	doCompute_sum(trans_result_mul, trans_result_sum);
	write(result_channels, trans_result_sum );
	
}

}
