// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.h"
//#include <stdio.h> 


#include "custypedef.h"


void funcRd(REFERENCE_STREAM(k2k<BOUT_type>, 32, weight_channels),  
			REFERENCE_STREAM(k2k<vec_type>, 32, img_channels), 
			hls::stream<Mxv_typer, 32>& fifowt,
			hls::stream<Mxv_typer, 32>& fifofmp){
	#pragma HLS INLINE

	Mxv_typer knl;
	Mxv_typer img;

	int i = 0, h = 0, w = 0, n = 0, j = 0;
	while(i < DATA_SIZE_M/PE_NUM_M){
		for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS pipeline ii=1 rewind
			k2k<vec_type> _trans_img;
			k2k<BOUT_type> _trans_knl;
			if(j + para < DATA_SIZE_K * DATA_SIZE_N){
				_trans_img = img_channels.read();
				_trans_knl = weight_channels.read();

				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					img.data[para].data[s] 	= _trans_img.data(GET_BIT(DATA_TYPE)*(s+1)-1, GET_BIT(DATA_TYPE)*s);
					for(uint mm = 1; mm< PE_NUM_M; mm++){
						#pragma HLS unroll
						img.data[para].data[s+3*mm]= img.data[para].data[s];
					}
				}	
				for(uint s = 0; s<VEC_SIZE*PE_NUM_M; s++){
					#pragma HLS unroll
					knl.data[para].data[s]	= _trans_knl.data(GET_BIT(DATA_TYPE)*(s+1)-1, GET_BIT(DATA_TYPE)*s);
				}
			}else{
				for(uint s = 0; s<VEC_SIZE*PE_NUM_M; s++){
					#pragma HLS unroll
					img.data[para].data[s] = 0;
				}
				for(uint s = 0; s<VEC_SIZE*PE_NUM_M; s++){
					#pragma HLS unroll
					knl.data[para].data[s] = 0;
				}
			}
		}
		
		fifowt.write(knl);
		fifofmp.write(img);
		
		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			++i;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			h = 0;
		else if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			++h;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			w = 0;
		else if(j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			++w;
		

		if(j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			j = 0;
		else
			j += FADD_LAT;

	}
}

void funcUp(
			hls::stream<Mxv_typer, 32>& fifo,
			hls::stream<Mxv_typer, 32>& fifowt,
			hls::stream<Mxv_typer, 32>& fifofmp
			){
	#pragma HLS INLINE
	DATA_TYPE img[VEC_SIZE*PE_NUM_M];
	DATA_TYPE knl[VEC_SIZE*PE_NUM_M];
	DATA_TYPE mul[VEC_SIZE*PE_NUM_M];
	Mxv_typer knl_st;
	Mxv_typer img_st;

	DATA_TYPE tmp[FADD_LAT][PE_NUM_M*VEC_SIZE];

	Mxv_typer acc_tmp_fifo;


	int i = 0, h = 0, w = 0, n = 0, j = 0;
	while(i < DATA_SIZE_M/PE_NUM_M){
		for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS pipeline ii=1 rewind
			for(int tt = 0; tt < VEC_SIZE; tt++){
				#pragma HLS unroll
				for(uint mm = 0; mm< PE_NUM_M; mm++){
					#pragma HLS unroll
					tmp[para][mm*VEC_SIZE+tt] = 0; 
				}
			}
		}
		Outer:for(int j = 0; j < DATA_SIZE_K * DATA_SIZE_N; j += FADD_LAT){
			#pragma HLS pipeline ii=1 rewind
			knl_st = fifowt.read();
			img_st = fifofmp.read();

			Inner:for(int para = 0; para < FADD_LAT; para++){
				#pragma HLS unroll

				for(uint s = 0; s<VEC_SIZE*PE_NUM_M; s++){
					#pragma HLS unroll
					img[s] 	= img_st.data[para].data[s];
					knl[s] 	= knl_st.data[para].data[s];
				}
						
				for(int k=0; k<VEC_SIZE*PE_NUM_M; k++){
					#pragma HLS unroll
					mul[k] = knl[k] * img[k];
				}

				for(int k=0; k<VEC_SIZE; k++){
					#pragma HLS pipeline ii=1 rewind
					for(uint mm = 0; mm< PE_NUM_M; mm++){
						#pragma HLS unroll
						tmp[para][mm*VEC_SIZE+k] += mul[mm*VEC_SIZE+k];
					}
				}
			}
		}

		for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS pipeline ii=1 rewind
			for(int tt = 0; tt < VEC_SIZE; tt++){
				#pragma HLS unroll
				for(uint mm = 0; mm< PE_NUM_M; mm++){
					#pragma HLS unroll
					acc_tmp_fifo.data[para].data[tt + VEC_SIZE * mm] = tmp[para][mm*VEC_SIZE+tt];
				}
			}
		}
		
		fifo.write(acc_tmp_fifo);
		
		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			++i;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			h = 0;
		else if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			++h;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			w = 0;
		else
			++w;
	}
}

void funcDown(	REFERENCE_STREAM(k2k<M_type>, 32, result_channels),
				hls::stream<Mxv_typer, 32>& fifo){
	#pragma HLS INLINE
	
	DATA_TYPE acc[PE_NUM_M];
	DATA_TYPE acc_tmp[PE_NUM_M*VEC_SIZE];
	Mxv_typer acc_tmp_fifo;
	int result_flag = 0;

	int i = 0, h = 0, w = 0, n = 0, j = 0;
	Top:while(i < DATA_SIZE_M/PE_NUM_M){
		
		for(uint mm = 0; mm< PE_NUM_M; mm++){
			#pragma HLS unroll
			acc[mm]=0;
		}

		k2k<M_type> _trans_acc;
		for (int tt = 0; tt < PE_NUM_M*VEC_SIZE; tt++){
			#pragma HLS unroll
			acc_tmp[tt]=0;
		}
		acc_tmp_fifo = fifo.read();
		Outer:for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS PIPELINE II=1 rewind
			Inner:for (int tt = 0; tt < PE_NUM_M*VEC_SIZE; tt++){
				#pragma HLS unroll
				acc_tmp[tt] += acc_tmp_fifo.data[para].data[tt];
			}
		}
		for (int tt = 0; tt < VEC_SIZE; tt++){
			#pragma HLS PIPELINE II=1 rewind
			for(uint mm = 0; mm< PE_NUM_M; mm++){
				#pragma HLS unroll
				acc[mm] += acc_tmp[tt + VEC_SIZE*mm];
			}
		}
		
		for(uint mm = 0; mm< PE_NUM_M; mm++){
			#pragma HLS unroll
			_trans_acc.data(GET_BIT(DATA_TYPE)*(mm+1)-1, GET_BIT(DATA_TYPE)*mm) = acc[mm];
		}
		result_channels.write(_trans_acc);


		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			++i;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			h = 0;
		else if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			++h;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)	
			w = 0;	
		else			
			++w;		
		
	}
}

extern "C" {
void coreConv(
	REFERENCE_STREAM(k2k<BOUT_type>, 64, weight_channels), 
	REFERENCE_STREAM(k2k<vec_type>, 32, img_channels),
	REFERENCE_STREAM(k2k<M_type>, 32, result_channels)
)
{
	#pragma HLS INLINE recursive
	#pragma HLS interface ap_ctrl_none port=return // Special pragma for freerunning kernel
	#pragma HLS INTERFACE axis  port = weight_channels	depth=64
	#pragma HLS INTERFACE axis  port = img_channels		depth=32
	#pragma HLS INTERFACE axis  port = result_channels	depth=512
	#pragma HLS dataflow

	hls::stream<Mxv_typer, 32> fifo;
	hls::stream<Mxv_typer, 32> fifowt;
	hls::stream<Mxv_typer, 32> fifofmp;
	funcRd(weight_channels, img_channels, fifowt, fifofmp);
	funcUp(fifo, fifowt, fifofmp);
	funcDown(result_channels, fifo);
}
}

