#include "ops.h"
#include <array>
#include "im2col.h"
#include "gemm.h"
#include <fstream>
#include <queue>
#include "cim144.h"
#include "config.h"


Tensor Conv2d_op_T(const Tensor& input, const Tensor& weight, const std::array<int, 2>& strides,int shift, const std::array<int, 4>& padding, const int& dilation)
{
	auto tensor_dims = input.dims();
	auto ksize = weight.dims();
	auto weight_data = weight.data().get();
	auto input_data = input.data().get();
	//���� NCHW
	auto tensor_height = tensor_dims[2];
	auto tensor_width = tensor_dims[3];
	auto tensor_chann = tensor_dims[1];
	//Ȩ�� OIHW
	auto kernel_height = ksize[2];
	auto kernel_width = ksize[3];
	auto input_channel = ksize[1];
	auto output_channel = ksize[0];
	auto kernel_size = kernel_height * kernel_width;
	//����
	auto stride_height = strides[0];
	auto stride_width = strides[1];
	//���
	auto padding_top = padding[0];
	auto padding_bottom = padding[1];
	auto padding_left = padding[2];
	auto padding_right = padding[3];
	//����ߴ�
	int output_height = (tensor_height + padding_top + padding_bottom - kernel_height) / stride_height + 1;
	int output_width = (tensor_width + padding_left + padding_right - kernel_width) / stride_width + 1;
	auto output_size = output_height * output_width * output_channel;
	// std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});
	
	int m = output_channel;
	int k = kernel_height * kernel_width * input_channel;
	int n = output_height * output_width;

	auto a = weight_data;
	int b_size = k * n;
	auto im = input_data;
	
	int b_w = n;
	int b_h = k;
	int weight_buf = b_w;
	std::shared_ptr<int8_t>   b(new int8_t[b_size]{ 0 });
	std::shared_ptr<int8_t[]> output_buf (new int8_t[output_size] {0});
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});
	// auto c  = output_data.get();
	auto c2 = output_buf.get();
	auto c3 = output_data.get();
	im2col_cpu_T(im,input_channel,tensor_height,tensor_width,kernel_height,stride_height,padding_left,b.get());
	gemm_T(0, 0, m, n, k, 1, a, k, b.get(), n, 1, c2, n,shift);
	col2im_T(c2,output_channel,c3,output_channel,b_w);

	return Tensor(output_data, { 1, output_channel, output_height, output_width});
}

Tensor Conv2d_op_cim(const Tensor& input,  const Tensor& weight, 
				     const ArrayConfig& ac,const std::array<int, 2>& strides, 
					 const std::array<int, 4>& padding,const int& dilation)
{
	auto tensor_dims = input.dims();
	auto ksize = weight.dims();
	auto weight_data = weight.data().get();
	auto input_data = input.data().get();
	//���� NCHW
	auto tensor_height = tensor_dims[2];
	auto tensor_width = tensor_dims[3];
	auto tensor_chann = tensor_dims[1];
	//Ȩ�� OIHW
	auto kernel_height = ksize[2];
	auto kernel_width = ksize[3];
	auto input_channel = ksize[1];
	auto output_channel = ksize[0];
	auto kernel_size = kernel_height * kernel_width;
	//����
	auto stride_height = strides[0];
	auto stride_width = strides[1];
	//���
	auto padding_top = padding[0];
	auto padding_bottom = padding[1];
	auto padding_left = padding[2];
	auto padding_right = padding[3];
	//����ߴ�
	int output_height = (tensor_height + padding_top + padding_bottom - kernel_height) / stride_height + 1;
	int output_width = (tensor_width + padding_left + padding_right - kernel_width) / stride_width + 1;
	// std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});
	
	int m = output_channel;
	int k = kernel_height * kernel_width * input_channel;   //weigt_data_size
	int n = output_height * output_width;					//out_img  h*w

	auto output_size = n * output_channel;
	int output_channel_buf = Align8(output_channel);

	auto output_buf_size = n * output_channel_buf;
	auto a = weight_data;
	int b_size = n * Align8(k);
	
	auto im = input_data;
	int b_w = n;
	int b_h = k;
	int weight_buf = b_w;
	// im2col -> b :  
	std::shared_ptr<int8_t>   b(new int8_t[b_size]{ 0 });
	std::shared_ptr<int8_t[]> output_buf (new int8_t[output_buf_size] {0});
	std::shared_ptr<int8_t[]> output_data(new int8_t[Align8(output_size)] {0});
	// auto c  = output_data.get();
	auto c2 = output_buf.get();
	auto c3 = output_data.get();
	// im2col_cpu_T(im,input_channel,tensor_height,tensor_width,kernel_height,stride_height,padding_left,b.get());
	im2col_cpu_T_cim(im,input_channel,tensor_height,tensor_width,kernel_height,stride_height,padding_left,b.get(),ac.pc.push_frond);
	// gemm_T_cim(0, 0, m, n, k, 1, a, k, b.get(), n, 1, c2, n,ac);
	ROCC_config(ac.rc.config);
	for(int i = 0; i<n; i++){
        ROCC_push((int64_t)(b.get() + i * k),ac.rc.push_rs2);  // k = k_w*k_h*in_c
        ROCC_mvm(ac.rc.mvm_rs1,ac.rc.mvm_rs2);
        ROCC_save((int64_t)(c2 + i * m),ac.rc.save_rs2);  // m = out_channel
	}
	// col2im_T(c2,output_channel,c3,output_channel,b_w);
	col2im_T_cim(c2,output_channel_buf,c3,output_channel,b_w,ac.pc.save_frond);
	return Tensor(output_data, { 1, output_channel, output_height, output_width});
}

// Tensor Connect_cim(const Tensor& input, const Tensor& weight ) {
Tensor Connect_cim(const Tensor& input, const Tensor& weight ,const ArrayConfig &ac) {	
	// auto tensor_dims = input.dims();
	auto input_data = input.data().get();
	// //���� NCHW
	// auto tensor_height = tensor_dims[2];
	// auto tensor_width = tensor_dims[3];
	// auto tensor_chann = tensor_dims[1];

	// auto weight_data = weight.data().get();
	// auto output_size = weight.size() / input.size();
	// std::shared_ptr<int8_t[]> output_data(new int8_t[Align8(outbuf_size)] {0});
	int output_size = ac.pc.save_num;
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});
	int8_t * output_ptr = output_data.get();
	ROCC_config(ac.rc.config);
	ROCC_push((int64_t)input_data ,ac.rc.push_rs2);
    ROCC_mvm(ac.rc.mvm_rs1,ac.rc.mvm_rs2);
    ROCC_save((int64_t)(output_ptr),ac.rc.save_rs2);

	int save_frond = ac.pc.save_frond;
	if( save_frond>0){
		for(int i = 0;i< ac.mc.mvm_col_num;i++){
			output_ptr[i] = output_ptr[i + save_frond];
		}
	}
	return Tensor(output_data, { 1, output_size, 1, 1});
}

Tensor AddBias_op(const Tensor& input, const Tensor& bias)
{

	auto input_dims = input.dims();
	auto channel = input_dims[1];
	auto row = input_dims[2];
	auto col = input_dims[3];
	auto bias_dims = bias.dims();
	auto length = bias_dims[1];
	auto input_data = input.data().get();
	auto bias_data = bias.data().get();
	
	auto output_size = row * col * channel;
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size]);
	for (int c = 0; c < channel; c++) {
		for (int h = 0; h < row; h++) {
			for (int w = 0; w < col; w++) {
				auto out_index = c * row * col + h * col + w;
				output_data[out_index] = input_data[out_index] + bias_data[c];
			}
		}
	}
	return Tensor(output_data, { 1, channel, row, col });
}

std::vector<std::pair<int, int8_t>> TopK(const Tensor& input, int k) {
	auto temp = input.data().get();
	std::priority_queue<std::pair<double, int>> q;
	for (int i = 0; i < input.size(); ++i) {
		q.push(std::pair<double, int>(temp[i], i));
	}
	std::vector<std::pair<int, int8_t>> ivmap;
	for (int i = 0; i < k; ++i) {
		int ki = q.top().second;
		ivmap.push_back(std::pair<int, int8_t>(ki, temp[ki]));
		q.pop();
	}
	return ivmap;
}

Tensor Conv2d_op(const Tensor& input, const Tensor& weight, const std::array<int, 2>& strides, const std::array<int, 4>& padding, const int& dilation)

{
	auto tensor_dims = input.dims();
	auto ksize = weight.dims();
	auto weight_data = weight.data().get();
	auto input_data = input.data().get();
	//���� NCHW
	auto tensor_height = tensor_dims[2];
	auto tensor_width = tensor_dims[3];
	auto tensor_chann = tensor_dims[1];
	//Ȩ�� OIHW
	auto kernel_height = ksize[2];
	auto kernel_width = ksize[3];
	auto input_channel = ksize[1];
	auto output_channel = ksize[0];
	auto kernel_size = kernel_height * kernel_width;
	//����
	auto stride_height = strides[0];
	auto stride_width = strides[1];
	//���
	auto padding_top = padding[0];
	auto padding_bottom = padding[1];
	auto padding_left = padding[2];
	auto padding_right = padding[3];
	//����ߴ�
	int output_height = (tensor_height + padding_top + padding_bottom - kernel_height) / stride_height + 1;
	int output_width = (tensor_width + padding_left + padding_right - kernel_width) / stride_width + 1;
	auto output_size = output_height * output_width * output_channel;
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});
	
	int m = output_channel;
	int k = kernel_height * kernel_width * input_channel;
	int n = output_height * output_width;

	auto a = weight_data;
	int b_size = input_channel * kernel_height * kernel_width * output_height * output_width;
	std::shared_ptr<int8_t> b(new int8_t[b_size]{ 0 });
	auto im = input_data;
	auto c = output_data.get();
	im2col_cpu_ext(im, input_channel, tensor_height, tensor_width, kernel_height, kernel_width, padding_top, padding_left, stride_height, stride_width, dilation, dilation, b.get());
	gemm(0, 0, m, n, k, 1, a, k, b.get(), n, 1, c, n);
	return Tensor(output_data, { 1, output_channel, output_height, output_width});
}

Tensor Softmax(const Tensor& input) {
	auto tensor_dims = input.dims();
	auto batch = tensor_dims[0];
	auto temp = input.data().get();
	int  num = input.size() / batch;
	std::shared_ptr<int8_t[]> buf(new int8_t[input.size()]);
	for (int nn = 0; nn < batch; nn++) {
		int id = (int)std::distance(temp + nn * num, std::max_element(temp + nn * num, temp + (nn + 1) * num));
		int8_t max = *(temp + nn * num + id);
		std::transform(temp + nn * num, temp + (nn + 1) * num, buf.get() + nn * num, [&](auto f) { return std::exp(f - max); });
		auto ac = std::accumulate(buf.get() + nn * num, buf.get() + (nn + 1) * num, 0.f);
		std::transform(buf.get() + nn * num, buf.get() + (nn + 1) * num, buf.get() + nn * num, [&](auto f) { return f / ac; });
	}
	return Tensor(buf, input.dims());
}

Tensor Relu_ops(const Tensor& input) {
	auto input_dims = input.dims();
	auto channel = input_dims[1];
	auto row = input_dims[2];
	auto col = input_dims[3];
	auto input_data = input.data().get();
	auto output_size = row * col * channel;
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size]);
	for (int c = 0; c < channel; c++) {
		for (int h = 0; h < row; h++) {
			for (int w = 0; w < col; w++) {
				auto out_index = c * row * col + h * col + w;
				//output_data[out_index] = std::max(input_data[out_index], 0);
				output_data[out_index] = input_data[out_index]<0 ? 0:input_data[out_index];
			}
		}
	}
	return Tensor(output_data, { 1, channel, row, col});
}

Tensor MaxPool(const Tensor& input, const std::array<int, 2>& kernel_size, const std::array<int, 2>& strides, const std::array<int, 4>& padding) {
	auto tensor_dims = input.dims();
	auto input_data = input.data().get();
	//���� NCHW
	auto tensor_height = tensor_dims[2];
	auto tensor_width = tensor_dims[3];
	auto tensor_chann = tensor_dims[1];
	//Ȩ�� OIHW
	auto kernel_height = kernel_size[0];//O
	auto kernel_width = kernel_size[1];//I
	//����
	auto stride_height = strides[0];
	auto stride_width = strides[1];
	//���
	auto padding_top = padding[0];
	auto padding_bottom = padding[1];
	auto padding_left = padding[2];
	auto padding_right = padding[3];
	int8_t temp = 0; int8_t res = 0;
	//����ߴ�
	int output_height = (tensor_height + padding_top + padding_bottom - kernel_height) / stride_height + 1;
	int output_width = (tensor_width + padding_left + padding_right - kernel_width) / stride_width + 1;
	auto output_size = output_height * output_width * tensor_chann;
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});


	int i, j, k, m, n;
	int w_offset = -padding_left;
	int h_offset = -padding_top;

	int h = output_height;
	int w = output_width;
	int c = tensor_chann;

	for (k = 0; k < c; ++k) {
		for (i = 0; i < h; ++i) {
			for (j = 0; j < w; ++j) {
				int out_index = j + w * (i + h * k);
				int8_t max = -127;
				int max_i = -1;
				for (n = 0; n < kernel_height; ++n) {
					for (m = 0; m < kernel_width; ++m) {
						int cur_h = h_offset + i * stride_height + n;
						int cur_w = w_offset + j * stride_width + m;
						int index = cur_w + tensor_width * (cur_h + tensor_height * k);
						int valid = (cur_h >= 0 && cur_h < tensor_height&&
							cur_w >= 0 && cur_w < tensor_width);
						int8_t val = (valid != 0) ? input_data[index] : -127;
						max_i = (val > max) ? index : max_i;
						max = (val > max) ? val : max;
					}
				}
				output_data[out_index] = max;
			}
		}
	}
	return Tensor(output_data, { 1, tensor_chann, output_height, output_width });
}

Tensor Connect(const Tensor& input, const Tensor& weight,int shift) {
	auto tensor_dims = input.dims();
	auto input_data = input.data().get();
	//���� NCHW
	auto tensor_height = tensor_dims[2];
	auto tensor_width = tensor_dims[3];
	auto tensor_chann = tensor_dims[1];

	auto weight_data = weight.data().get();
	auto output_size = weight.size() / input.size();
	
	// std::cout << tensor_width <<" " <<output_size<< " "<<weight.size() <<" "<<input.size()<< std::endl;
	
	std::shared_ptr<int8_t[]> output_data(new int8_t[output_size] {0});

	for (int oc = 0; oc < output_size; oc++) {
		int temp = 0;
		for (int c = 0; c < tensor_chann; c++) {
			for (int h = 0; h < tensor_height; h++) {
				for (int w = 0; w < tensor_width; w++) {
					int weight_idx = oc * tensor_chann * tensor_height * tensor_width + c * tensor_height * tensor_width + h * tensor_width + w;
					int input_idx = c * tensor_height * tensor_width + h * tensor_width + w;
					temp += weight_data[weight_idx] * input_data[input_idx];
				}
			}
		}
		temp = temp>>shift;
		temp = temp>127	? 127:temp;
		temp = temp<-127? -127:temp;
		output_data[oc] = temp;
		// std::cout << temp<< " "; 
	}

	return Tensor(output_data, { 1, output_size, 1, 1});
}

