// This kernel perform vectorized floating-point addition/multiplication
// to demonstrate how parallel processing can accelerate computation
// 2020.06.17 by wangdong@bjtu.edu.cn
//#include "ap_int.h"
//#include <stdio.h> 


#include "custypedef.h"

//--------------------- Baseline -----------------------//

void funcRd(REFERENCE_STREAM(k2k<vec_type>, 16, weight_channels), 
			REFERENCE_STREAM(k2k<vec_type>, 16, img_channels), 
			hls::stream<vec_typer, 16>& fifowt,
			hls::stream<vec_typer, 16>& fifofmp){
	// DATA_TYPE img[VEC_SIZE];
	// DATA_TYPE knl[VEC_SIZE];
	// DATA_TYPE mul[VEC_SIZE];
	DATA_TYPE acc;
	const int bsize = (int)(5*5/VEC_SIZE)*VEC_SIZE+1;
	vec_typer knl;
	vec_typer img;
	// DATA_TYPE *tmp[] = reg<DATA_TYPE**>(tmp_i);
	DATA_TYPE acc_tmp_i[VEC_SIZE];
	DATA_TYPE *acc_tmp = reg<DATA_TYPE*>(acc_tmp_i);
	vec_type acc_tmp_fifo;
	// volatile int convcnt = 0;
	// volatile int convoutputcnt = 0;

	int i = 0, h = 0, w = 0, n = 0, j = 0;
	while(i < DATA_SIZE_M){
		// #pragma HLS pipeline ii=13 rewind
		// if(j == 0){
		// 	for(int tt = 0; tt < VEC_SIZE; tt++){
		// 		#pragma HLS unroll
		// 		acc_tmp[i] = 0;
		// 	}
		// }
		for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS pipeline ii=1 rewind
			k2k<vec_type> _trans_img;
			k2k<vec_type> _trans_knl;
			if(j + para < DATA_SIZE_K * DATA_SIZE_N){
				_trans_img = img_channels.read();
				_trans_knl = weight_channels.read();

				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					img.data[para].data[s] = _trans_img.data(GET_BIT(DATA_TYPE)*(s+1)-1, GET_BIT(DATA_TYPE)*s);
				}

				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					knl.data[para].data[s] = _trans_knl.data(GET_BIT(DATA_TYPE)*(s+1)-1, GET_BIT(DATA_TYPE)*s);
				}
			}else{
				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					img.data[para].data[s] = 0;
				}

				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					knl.data[para].data[s] = 0;
				}
			}
		}

		fifowt.write(knl);
		fifofmp.write(img);
		


		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			++i;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			h = 0;
		else if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			++h;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			w = 0;
		else if(j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			++w;
		
		// if(j == DATA_SIZE_K * DATA_SIZE_N - 1)
		// 	n = 0;
		// else if(j == DATA_SIZE_K - 1)
		// 	++n;

		if(j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
			j = 0;
		else
			j += FADD_LAT;

	}
}

void funcUp(
			hls::stream<vec_typer, 16>& fifo,
			hls::stream<vec_typer, 16>& fifowt,
			hls::stream<vec_typer, 16>& fifofmp
			){
	DATA_TYPE img[VEC_SIZE];
	DATA_TYPE knl[VEC_SIZE];
	DATA_TYPE mul[VEC_SIZE];
	vec_typer knl_st;
	vec_typer img_st;
	// vec_typer tmp_st;
	DATA_TYPE acc;
	const int bsize = (int)(5*5/VEC_SIZE)*VEC_SIZE+1;
	DATA_TYPE tmp[FADD_LAT][VEC_SIZE];
	// DATA_TYPE *tmp[] = reg<DATA_TYPE**>(tmp_i);
	DATA_TYPE acc_tmp_i[VEC_SIZE];
	DATA_TYPE *acc_tmp = reg<DATA_TYPE*>(acc_tmp_i);
	vec_typer acc_tmp_fifo;
	// volatile int convcnt = 0;
	// volatile int convoutputcnt = 0;

	int i = 0, h = 0, w = 0, n = 0, j = 0;
	while(i < DATA_SIZE_M){
		
		for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS unroll
			for(int tt = 0; tt < VEC_SIZE; tt++){
				#pragma HLS unroll
				tmp[para][tt] = 0;
			}
		}
		Outer:for(int j = 0; j < DATA_SIZE_K * DATA_SIZE_N; j += FADD_LAT){
			#pragma HLS pipeline ii=13 rewind
			knl_st = fifowt.read();
			img_st = fifofmp.read();
			Inner:for(int para = 0; para < FADD_LAT; para++){
				#pragma HLS unroll

				for(uint s = 0; s<VEC_SIZE; s++){
					#pragma HLS unroll
					img[s] = img_st.data[para].data[s];
					knl[s] = knl_st.data[para].data[s];
				}
						
				for(int k=0; k<VEC_SIZE; k++){
					#pragma HLS unroll
					mul[k]= knl[k] * img[k];
				}

				for(int k=0; k<VEC_SIZE; k++){
					#pragma HLS unroll
					tmp[para][k] += mul[k];
					// printf("tmp[%d][%d] = %f\n", para, k, tmp[para][k]);
				}
			}
		}

		for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS unroll
			for(int tt = 0; tt < VEC_SIZE; tt++){
				#pragma HLS unroll
				acc_tmp_fifo.data[para].data[tt] = tmp[para][tt];
			}
		}
		fifo.write(acc_tmp_fifo);
		
		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			++i;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			h = 0;
		else if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			++h;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			w = 0;
		else
			++w;
		
		// if(j == DATA_SIZE_K * DATA_SIZE_N - 1)
		// 	n = 0;
		// else if(j == DATA_SIZE_K - 1)
		// 	++n;

		// if(j >= DATA_SIZE_K * DATA_SIZE_N - FADD_LAT)
		// 	j = 0;
		// else
		// 	j += FADD_LAT;

	}
}

void funcDown(	REFERENCE_STREAM(k2k<DATA_TYPE>, 16, result_channels),
				hls::stream<vec_typer, 16>& fifo){
	DATA_TYPE img[VEC_SIZE];
	DATA_TYPE knl[VEC_SIZE];
	DATA_TYPE mul[VEC_SIZE];
	DATA_TYPE acc;
	const int bsize = (int)(5*5/VEC_SIZE)*VEC_SIZE+1;
	DATA_TYPE tmp_i[VEC_SIZE];
	DATA_TYPE *tmp = reg<DATA_TYPE*>(tmp_i);
	DATA_TYPE acc_tmp_i[VEC_SIZE];
	DATA_TYPE *acc_tmp = reg<DATA_TYPE*>(acc_tmp_i);
	vec_typer acc_tmp_fifo;
	// volatile int convcnt = 0;
	// volatile int convoutputcnt = 0;

	int i = 0, h = 0, w = 0, n = 0, j = 0;
	Top:while(i < DATA_SIZE_M){
		
		acc = 0;
		k2k<DATA_TYPE> _trans_acc;
		for (int tt = 0; tt < VEC_SIZE; tt++){
			#pragma HLS unroll
			acc_tmp[tt] = 0;
		}
		acc_tmp_fifo = fifo.read();
		Outer:for(int para = 0; para < FADD_LAT; para++){
			#pragma HLS unroll
			Inner:for (int tt = 0; tt < VEC_SIZE; tt++){
				#pragma HLS unroll
				acc_tmp[tt] += acc_tmp_fifo.data[para].data[tt];
			}
		}
		for (int tt = 0; tt < VEC_SIZE; tt++)
		#pragma HLS unroll
		{
			acc += acc_tmp[tt];
		}
		
		_trans_acc.data(GET_BIT(acc)-1, 0) = acc;
		result_channels.write(_trans_acc);

		// ++convoutputcnt;


		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			++i;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1 && h == DATA_SIZE_H - DATA_SIZE_K + 1 - 1)
			h = 0;
		else if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			++h;

		if(w == DATA_SIZE_W - DATA_SIZE_K + 1 - 1)
			w = 0;
		else
			++w;

	}
}

extern "C" {
void coreConv(
	REFERENCE_STREAM(k2k<vec_type>, 16, weight_channels), 
	REFERENCE_STREAM(k2k<vec_type>, 16, img_channels),
	REFERENCE_STREAM(k2k<DATA_TYPE>, 16, result_channels)
)
{
	#pragma HLS interface ap_ctrl_none port=return // Special pragma for freerunning kernel
	#pragma HLS INTERFACE axis  port = weight_channels depth=16
	#pragma HLS INTERFACE axis  port = img_channels depth=16
	#pragma HLS INTERFACE axis  port = result_channels depth=256
	#pragma HLS dataflow
	hls::stream<vec_typer, 16> fifo;
	hls::stream<vec_typer, 16> fifowt;
	hls::stream<vec_typer, 16> fifofmp;
	funcRd(weight_channels, img_channels, fifowt, fifofmp);
	funcUp(fifo, fifowt, fifofmp);
	funcDown(result_channels, fifo);
}
}

