﻿#include "pch.h"
using namespace std;

typedef double VALUE_TYPE;
constexpr auto LIB_TASK_GEMV = 1;
constexpr auto LIB_TASK_GETRF = 2;
constexpr auto LIB_TASK_GETRS = 4;

constexpr auto BLAS_LOWER = 121;
constexpr auto BLAS_UPPER = 122;

// 必须偶数
constexpr auto BLOCK_SIZE_SOLVER = 512;
constexpr auto BLOCK_SIZE_GEMV_R = 512;
constexpr auto BLOCK_SIZE_GEMV_L = 512;

typedef struct {
	int type;
	int n;
	int A_stride;
	double* A;
	double* x;
	double* B;
	int* ipiv;
	long long solver_task_handler;
	int increblas_object_id;
	HANDLE hHandle;
}Task;

class Timer {
	LARGE_INTEGER start_time, end_time, frequency;
public:
	void start() {
		QueryPerformanceFrequency(&frequency);
		QueryPerformanceCounter(&start_time);
	}

	void stop() {
		QueryPerformanceCounter(&end_time);
	}

	double get_time_ms() {
		double elapsed_time = static_cast<double>(end_time.QuadPart - start_time.QuadPart) * 1000.0 / frequency.QuadPart;
		return elapsed_time;
	}

	double get_time_s() {
		return get_time_ms() / 1000;
	}
};

// tf::Executor* executor;

class IncreBLAS {
	struct Block {
		VALUE_TYPE* data;
		VALUE_TYPE* gpu_data;
	};
	struct AreaDescriptor {
		int start_y;
		int start_x;
		int h;
		int w;
		int stride;
	};
	enum TaskType {
		TASK_COPY,
		TASK_GEMV
	};
	struct MicroTask {
		TaskType type;
		Block A;
		Block B;
		Block C;
		AreaDescriptor area_A;
		AreaDescriptor area_B;
		AreaDescriptor area_C;
	};
	struct MacroTask {
		TaskType type;
		VALUE_TYPE* A;
		VALUE_TYPE* B;
		VALUE_TYPE* C;
		AreaDescriptor area_A;
		AreaDescriptor area_B;
		AreaDescriptor area_C;
	};

	vector<Block> M;
	VALUE_TYPE* B_dev_comp = NULL;
	VALUE_TYPE* C_dev_comp = NULL;
	VALUE_TYPE* C_host_comp = NULL;
	const int BLOCK_SIZE;
	int N = 0;
	int N_COMPLETE = 0;
	int N_COMPLETE_LAST = 0;
	int N_SYNCED = 0;
	int order = 0;
	int order_last = 0;

	tf::Taskflow taskflow;
	tf::Executor executor;
	tf::Future<void> gpu_sync_future;
	tf::Future<void> cpu_gemv_future;
	int have_gpu_sync_stream = 0;
	Timer balance_timer;
	double balance_gpu_wait_ms = 0;
	int big_timestep_n_call = 0;
	

	const int n_stream = 2;
	cudaStream_t* streams = NULL;
	cublasHandle_t cublas_handle;

	int Mmap(int I, int J) {
		int layer = max(I, J);
		int start = layer * layer;
		int offset = 0;
		if (I > J) {
			offset = layer;
			offset += layer - (I - J);
		}
		else if (I < J) {
			offset = 0;
			offset += layer - (J - I);
		}
		else {
			offset = 2 * layer;
		}
		return start + offset;
	}

	void GEMV(Block C, const AreaDescriptor area_C, Block A, const AreaDescriptor area_A, Block B, const AreaDescriptor area_B)
	{
		cblas_dgemv(CblasColMajor, CblasNoTrans, area_A.h, area_A.w, 1, A.data + area_A.start_y + area_A.start_x * area_A.stride, area_A.stride, B.data + area_B.start_y,
			1, 1, C.data + area_C.start_y, 1);
	}

	void micro_worker(MicroTask task) {
		switch (task.type) {
		case(TASK_COPY):  // copy task.A to task.B
			// printf("[BLAS] MICRO COPY (%d:%d,%d:%d) -> (%d:%d,%d:%d), %d*%d\n",
			// 	task.area_A.start_y, task.area_A.start_y + task.area_A.h,
			// 	task.area_A.start_x, task.area_A.start_x + task.area_A.w,
			// 	task.area_B.start_y, task.area_B.start_y + task.area_B.h,
			// 	task.area_B.start_x, task.area_B.start_x + task.area_B.w,
			// 	task.area_A.h, task.area_A.w
			// 	);
			if (task.area_A.h == task.area_B.h && task.area_A.w == task.area_B.w) {
				for (int i = 0; i < task.area_A.w; i++) {
					memcpy(task.B.data + task.area_B.start_y + (task.area_B.start_x + i) * task.area_B.stride,
						task.A.data + task.area_A.start_y + (task.area_A.start_x + i) * task.area_A.stride,
						sizeof(VALUE_TYPE) * task.area_A.h);
				}
			}
			else {
				printf("[MICRO WORKER][COPY] Shape error. Exit(-3). \n");
				exit(-3);
			}
			break;

		case(TASK_GEMV): //task.C += task.A * task.B
			// printf("[BLAS] MICRO GEMV (%d:%d) += (%d:%d,%d:%d) * (%d:%d), %d*%d\n",
			// 	task.area_C.start_y, task.area_C.start_y + task.area_C.h,
			// 	task.area_A.start_y, task.area_A.start_y + task.area_A.h,
			// 	task.area_A.start_x, task.area_A.start_x + task.area_A.w,
			// 	task.area_B.start_y, task.area_B.start_y + task.area_B.h,
			// 	task.area_A.h, task.area_A.w
			// 	);
			if (task.area_A.w == task.area_B.h && task.area_A.h == task.area_C.h)
			{
				GEMV(task.C, task.area_C, task.A, task.area_A, task.B, task.area_B);
			}
			else {
				printf("[MICRO WORKER][GEMV] Shape error. Exit(-4). \n");
				exit(-4);
			}
			break;

		}
		return;
	}

	void macro_worker(tf::Subflow& subflow, MacroTask task) {
		// Macro task has pointer A, B or C.
		// If a pointer is NULL but the task uses this matrix, NULL means the matrix stored in M.
		// 
		// This function generates a macro task into a lot of micro tasks,
		// these micro tasks should be emplaced into subflow.

		switch (task.type) {
		case(TASK_COPY):  // copy task.A to task.B
			// printf("[BLAS] MACRO COPY (%d:%d,%d:%d) -> (%d:%d,%d:%d), %d*%d\n",
			// 	task.area_A.start_y, task.area_A.start_y + task.area_A.h,
			// 	task.area_A.start_x, task.area_A.start_x + task.area_A.w,
			// 	task.area_B.start_y, task.area_B.start_y + task.area_B.h,
			// 	task.area_B.start_x, task.area_B.start_x + task.area_B.w,
			// 	task.area_A.h, task.area_A.w
			// );
			if (task.area_A.h == task.area_B.h && task.area_A.w == task.area_B.w) {
				if (task.B == NULL) {
					const int c1_c2_I = task.area_B.start_y / BLOCK_SIZE;  //块1、2在Mmap中的行号，一致
					const int c1_c3_J = task.area_B.start_x / BLOCK_SIZE; //块1、3在Mmap中的列号，一致
					const int c3_c4_I = (task.area_B.start_y + task.area_B.h - 1) / BLOCK_SIZE; //块3、4在Mmap中的行号，一致
					const int c2_c4_J = (task.area_B.start_x + task.area_B.w - 1) / BLOCK_SIZE; //块2、4在Mmap中的列号，一致

					//const int area_A_c1_c2_I = task.area_A.start_y / BLOCK_SIZE;  //块1、2在Mmap中的行号，一致
					//const int area_A_c1_c3_J = task.area_A.start_x / BLOCK_SIZE; //块1、3在Mmap中的列号，一致
					//const int area_A_c3_c4_I = (task.area_A.start_y + task.area_A.h) / BLOCK_SIZE; //块3、4在Mmap中的行号，一致
					//const int area_A_c2_c4_J = (task.area_A.start_x + task.area_A.w) / BLOCK_SIZE; //块2、4在Mmap中的列号，一致

					int areaB_c1_c2_y = task.area_B.start_y - c1_c2_I * BLOCK_SIZE;
					int areaA_c3_c4_y = task.area_A.start_y + c3_c4_I * BLOCK_SIZE - task.area_B.start_y;
					int areaB_c1_c3_x = task.area_B.start_x - c1_c3_J * BLOCK_SIZE;
					int areaA_c2_c4_x = task.area_A.start_x + c2_c4_J * BLOCK_SIZE - task.area_B.start_x;

					//以下数据在area_A和area_B中一样
					int c1_c2_h;
					int c3_c4_h;
					int c1_c3_w;
					int c2_c4_w;
					if (c1_c2_I == c3_c4_I)
					{
						c1_c2_h = task.area_B.h;
						c3_c4_h = c1_c2_h;
					}
					else
					{
						c1_c2_h = (c1_c2_I + 1) * BLOCK_SIZE - task.area_B.start_y;
						c3_c4_h = task.area_B.h - (c3_c4_I * BLOCK_SIZE - task.area_B.start_y);
					}

					if (c1_c3_J == c2_c4_J)
					{
						c1_c3_w = task.area_B.w;
						c2_c4_w = c1_c3_w;
					}
					else
					{
						c1_c3_w = (c1_c3_J + 1) * BLOCK_SIZE - task.area_B.start_x;
						c2_c4_w = task.area_B.w - (c2_c4_J * BLOCK_SIZE - task.area_B.start_x);
					}
					// 处理上边所在的非角落块
					for (int J = c1_c3_J + 1; J < c2_c4_J; J++) {
						subflow.emplace([=]() {
							micro_worker({
								TASK_COPY, {task.A}, M[Mmap(c1_c2_I, J)], {},
								{task.area_A.start_y, task.area_A.start_x + (J * BLOCK_SIZE - task.area_B.start_x), c1_c2_h, BLOCK_SIZE, task.area_A.stride},
								{areaB_c1_c2_y, 0, c1_c2_h, BLOCK_SIZE, BLOCK_SIZE}
								});
							});
					}

					// 处理下边所在的非角落块
					for (int J = c1_c3_J + 1; J < c2_c4_J; J++) {
						subflow.emplace([=]() {
							micro_worker({
							TASK_COPY, {task.A}, M[Mmap(c3_c4_I, J)], {},
							{areaA_c3_c4_y, task.area_A.start_x + (J * BLOCK_SIZE - task.area_B.start_x), c3_c4_h, BLOCK_SIZE, task.area_A.stride},
							{0, 0,  c3_c4_h, BLOCK_SIZE, BLOCK_SIZE}
								});
							});
					}

					// 处理左边所在的非角落块
					for (int I = c1_c2_I + 1; I < c3_c4_I; I++)
					{
						subflow.emplace([=]() {
							micro_worker({
								TASK_COPY, {task.A}, M[Mmap(I, c1_c3_J)], {},
								{task.area_A.start_y + (I * BLOCK_SIZE - task.area_B.start_y), task.area_A.start_x, BLOCK_SIZE, c1_c3_w, task.area_A.stride},
								{0, areaB_c1_c3_x, BLOCK_SIZE,c1_c3_w, BLOCK_SIZE}
								});
							});
					}

					// 处理右边所在的非角落块
					for (int I = c1_c2_I + 1; I < c3_c4_I; I++)
					{
						subflow.emplace([=]() {
							micro_worker({
								TASK_COPY, {task.A}, M[Mmap(I, c2_c4_J)], {},
								{task.area_A.start_y + (I * BLOCK_SIZE - task.area_B.start_y), areaA_c2_c4_x, BLOCK_SIZE, c2_c4_w, task.area_A.stride},
								{0, 0, BLOCK_SIZE,c2_c4_w, BLOCK_SIZE}
								});
							});
					}

					// 处理四个角落块
					//c1
					subflow.emplace([=]() {
						micro_worker({
							TASK_COPY, {task.A}, M[Mmap(c1_c2_I, c1_c3_J)], {},
							{task.area_A.start_y, task.area_A.start_x, c1_c2_h, c1_c3_w, task.area_A.stride},
							{areaB_c1_c2_y, areaB_c1_c3_x, c1_c2_h, c1_c3_w, BLOCK_SIZE}
							});
						});
					//c2
					if (c2_c4_J > c1_c3_J)
					{
						subflow.emplace([=]() {
							micro_worker({
								TASK_COPY, {task.A}, M[Mmap(c1_c2_I, c2_c4_J)], {},
								{task.area_A.start_y, areaA_c2_c4_x, c1_c2_h, c2_c4_w, task.area_A.stride},
								{areaB_c1_c2_y, 0, c1_c2_h, c2_c4_w, BLOCK_SIZE}
								});
							});
					}
					//c3
					if (c3_c4_I > c1_c2_I)
					{
						subflow.emplace([=]() {
							micro_worker({
								TASK_COPY, {task.A}, M[Mmap(c3_c4_I, c1_c3_J)], {},
								{areaA_c3_c4_y, task.area_A.start_x, c3_c4_h, c1_c3_w, task.area_A.stride},
								{0, areaB_c1_c3_x,c3_c4_h,c1_c3_w, BLOCK_SIZE}
								});
							});
					}
					//c4
					if ((c2_c4_J > c1_c3_J) && (c3_c4_I > c1_c2_I))
					{
						subflow.emplace([=]() {
							micro_worker({
								TASK_COPY, {task.A}, M[Mmap(c3_c4_I, c2_c4_J)], {},
								{areaA_c3_c4_y, areaA_c2_c4_x, c3_c4_h, c2_c4_w, task.area_A.stride},
								{0, 0, c3_c4_h, c2_c4_w, BLOCK_SIZE}
								});
							});
					}

					// 处理中间块
					int areaA_starty = task.area_A.start_y + c1_c2_h;
					for (int I = c1_c2_I + 1; I < c3_c4_I; I++)
					{
						int areaA_startx = task.area_A.start_x + c1_c3_w;
						for (int J = c1_c3_J + 1; J < c2_c4_J; J++)
						{
							subflow.emplace([=]() {
								micro_worker({
									TASK_COPY, {task.A}, M[Mmap(I, J)], {},
									{areaA_starty, areaA_startx, BLOCK_SIZE, BLOCK_SIZE, task.area_A.stride},
									{0, 0, BLOCK_SIZE,BLOCK_SIZE, BLOCK_SIZE}
									});
								});
							areaA_startx += BLOCK_SIZE;
						}
						areaA_starty += BLOCK_SIZE;
					}
				}
				else {
					printf("[MACRO WORKER][COPY] Not implemented error. Exit(-1). \n");
					exit(-1);
				}
			}
			else {
				printf("[MACRO WORKER][COPY] Shape error. Exit(-13). \n");
				exit(-13);
			}

			break;
		}
		return;
	}

	//把输入的A存到M块里，是gemv5块中的一块
	void merge_scheduler(VALUE_TYPE* A, int A_stride, int n) {
		//本次未增加
		if (n == order) {
			return;
		}
		order_last = order;
		order = n;

		N = ceil((double)order / BLOCK_SIZE);
		N_COMPLETE = order / BLOCK_SIZE;
		while (M.size() < N * N) {
			M.push_back({ (VALUE_TYPE*)malloc(sizeof(VALUE_TYPE) * BLOCK_SIZE * BLOCK_SIZE), NULL });
		}

		auto merge_r = taskflow.emplace([=](tf::Subflow& subflow) {
			macro_worker(subflow, {
				TASK_COPY, A, NULL, NULL,
				{0, order_last, order_last, order - order_last, A_stride},
				{0, order_last, order_last, order - order_last, A_stride},
				{}
				});
			});

		auto merge_b = taskflow.emplace([=](tf::Subflow& subflow) {
			macro_worker(subflow, {
				TASK_COPY, A, NULL, NULL,
				{order_last, 0, order - order_last, order_last, A_stride},
				{order_last, 0, order - order_last, order_last, A_stride},
				{}
				});
			});

		auto merge_c = taskflow.emplace([=](tf::Subflow& subflow) {
			macro_worker(subflow, {
				TASK_COPY, A, NULL, NULL,
				{order_last, order_last, order - order_last, order - order_last, A_stride},
				{order_last, order_last, order - order_last, order - order_last, A_stride},
				{}
				});
			});

	}

	void dnmv_scheduler(VALUE_TYPE* B, VALUE_TYPE* C) {
		//A变成M上相应的block，可能是多个子矩阵块，做子矩阵块和相应向量部分的矩阵向量乘
		int add = order % BLOCK_SIZE;
		//对于已经固化的完整块做gemv
		// TODO : CPU GPU 抢占式计算
		// TODO : Reduction GEMV
		for (int I = 0; I < N_COMPLETE; I++)
		{
			tf::Task blocks_in_row_right_upper = taskflow.emplace([]() {});
			tf::Task blocks_in_row_right_lower = taskflow.emplace([]() {});
			tf::Task current_block_task_left_upper, current_block_task_left_lower, current_block_task_right_upper, current_block_task_right_lower;
			for (int J = 0; J < N_COMPLETE; J++)
			{
				if (J >= N_SYNCED || I >= N_SYNCED) {
					current_block_task_left_upper = taskflow.emplace([=]() {
						micro_worker({
							TASK_GEMV, M[Mmap(I, J)], {B,NULL}, {C,NULL},
							{0, 0, BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE},
							{J * BLOCK_SIZE, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							{I * BLOCK_SIZE, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							});
						});
					current_block_task_left_lower = taskflow.emplace([=]() {
						micro_worker({
							TASK_GEMV, M[Mmap(I, J)], {B,NULL}, {C,NULL},
							{BLOCK_SIZE / 2, 0, BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE},
							{J * BLOCK_SIZE, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							{I * BLOCK_SIZE + BLOCK_SIZE / 2, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							});
						});
					current_block_task_right_upper = taskflow.emplace([=]() {
						micro_worker({
							TASK_GEMV, M[Mmap(I, J)], {B,NULL}, {C,NULL},
							{0, BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE},
							{J * BLOCK_SIZE + BLOCK_SIZE / 2, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							{I * BLOCK_SIZE, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							});
						});
					current_block_task_right_lower = taskflow.emplace([=]() {
						micro_worker({
							TASK_GEMV, M[Mmap(I, J)], {B,NULL}, {C,NULL},
							{BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE / 2, BLOCK_SIZE},
							{J * BLOCK_SIZE + BLOCK_SIZE / 2, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							{I * BLOCK_SIZE + BLOCK_SIZE / 2, 0, BLOCK_SIZE / 2, 1, BLOCK_SIZE},
							});
						});
					current_block_task_left_upper.succeed(blocks_in_row_right_upper);
					current_block_task_right_upper.succeed(current_block_task_left_upper);
					blocks_in_row_right_upper = current_block_task_right_upper;

					current_block_task_left_lower.succeed(blocks_in_row_right_lower);
					current_block_task_right_lower.succeed(current_block_task_left_lower);
					blocks_in_row_right_lower = current_block_task_right_lower;

				}
			}


			if (add) {
				//对于还未填满的最后一列做gemv
				auto incomplete_block_in_row = taskflow.emplace([=]() {
					micro_worker({
						TASK_GEMV,  M[Mmap(I, N_COMPLETE)], {B,NULL}, {C,NULL},
						{0,0,BLOCK_SIZE, order % BLOCK_SIZE, BLOCK_SIZE},
						{N_COMPLETE * BLOCK_SIZE, 0, order % BLOCK_SIZE, 1, BLOCK_SIZE},
						{I * BLOCK_SIZE, 0, BLOCK_SIZE, 1, BLOCK_SIZE}
						});
					});
				incomplete_block_in_row.succeed(blocks_in_row_right_upper, blocks_in_row_right_lower);
			}
		}

		if (add) {

			tf::Task task_last_row;
			//对于还未填满的最后一个对角矩阵块做gemv
			task_last_row = taskflow.emplace([=]() {
				micro_worker({
						TASK_GEMV,  M[Mmap(N_COMPLETE, N_COMPLETE)], {B,NULL}, {C,NULL},
						{0,0, order % BLOCK_SIZE,order % BLOCK_SIZE, BLOCK_SIZE},
						{N_COMPLETE * BLOCK_SIZE, 0, order % BLOCK_SIZE, 1, BLOCK_SIZE},
						{N_COMPLETE * BLOCK_SIZE, 0, order % BLOCK_SIZE, 1, BLOCK_SIZE},
					});
				});

			//对于还未填满的最后一行做gemv
			// TODO : Reduction
			for (int J = 0; J < N_COMPLETE; J++)
			{
				auto task_last_row_item = taskflow.emplace([=]() {
					micro_worker({
						TASK_GEMV,  M[Mmap(N_COMPLETE, J)], {B,NULL}, {C,NULL},
						{0,0, order % BLOCK_SIZE,BLOCK_SIZE, BLOCK_SIZE},
						{J * BLOCK_SIZE, 0, BLOCK_SIZE, 1, BLOCK_SIZE},
						{N_COMPLETE * BLOCK_SIZE, 0, order % BLOCK_SIZE, 1, BLOCK_SIZE}
						});
					});
				task_last_row_item.succeed(task_last_row);
				task_last_row = task_last_row_item;
			}


		}

	}

	//void dnmv_gpu(VALUE_TYPE* B, VALUE_TYPE* C) {
	//	cudaMemcpyAsync(B_dev_comp, B, sizeof(VALUE_TYPE) * N_SYNCED * BLOCK_SIZE, cudaMemcpyHostToDevice, streams[0]);
	//	cudaMemsetAsync(C_dev_comp, 0, sizeof(VALUE_TYPE) * N_SYNCED * BLOCK_SIZE, streams[0]);
	//	for (int J = 0; J < N_SYNCED; J++) {
	//		for (int I = 0; I < N_SYNCED; I++) {
	//			double alpha = 1.0, beta = 1.0;
	//			cublasStatus_t status = cublasDgemv(cublas_handle, cublasOperation_t::CUBLAS_OP_N,
	//				BLOCK_SIZE, BLOCK_SIZE,
	//				&alpha,
	//				M[Mmap(I, J)].gpu_data, BLOCK_SIZE,
	//				B_dev_comp + J * BLOCK_SIZE, 1,
	//				&beta,
	//				C_dev_comp + I * BLOCK_SIZE, 1);
	//			if (status != CUBLAS_STATUS_SUCCESS) {
	//				printf("Cublas Error %d\n", status);
	//				exit(-1);
	//			}
	//		}
	//	}
	//	cudaMemcpyAsync(C_host_comp, C_dev_comp, sizeof(VALUE_TYPE) * N_SYNCED * BLOCK_SIZE, cudaMemcpyDeviceToHost, streams[0]);
	//	cudaStreamSynchronize(streams[0]);
	//}

	void dnmv_gpu(VALUE_TYPE* B, VALUE_TYPE* C) {
		cudaMemcpy(B_dev_comp, B, sizeof(VALUE_TYPE) * N_SYNCED * BLOCK_SIZE, cudaMemcpyHostToDevice);
		cudaMemset(C_dev_comp, 0, sizeof(VALUE_TYPE) * N_SYNCED * BLOCK_SIZE);
		for (int J = 0; J < N_SYNCED; J++) {
			for (int I = 0; I < N_SYNCED; I++) {
				double alpha = 1.0, beta = 1.0;
				cublasStatus_t status = cublasDgemv(cublas_handle, cublasOperation_t::CUBLAS_OP_N,
					BLOCK_SIZE, BLOCK_SIZE,
					&alpha,
					M[Mmap(I, J)].gpu_data, BLOCK_SIZE,
					B_dev_comp + J * BLOCK_SIZE, 1,
					&beta,
					C_dev_comp + I * BLOCK_SIZE, 1);
				if (status != CUBLAS_STATUS_SUCCESS) {
					printf("Cublas Error %d\n", status);
					exit(-1);
				}
			}
		}
		cudaMemcpy(C_host_comp, C_dev_comp, sizeof(VALUE_TYPE) * N_SYNCED * BLOCK_SIZE, cudaMemcpyDeviceToHost);
	}


	//void gpu_sync() {
	//	if ((N_COMPLETE_LAST == N_COMPLETE && big_timestep_n_call < 1000) || N_SYNCED >= N_COMPLETE ) {
	//		return;
	//	}
	//	N_COMPLETE_LAST = N_COMPLETE;
	//	double avg_balance_gpu_wait_ms = balance_gpu_wait_ms / big_timestep_n_call;
	//	printf("[SYNC] %d %d %d %e\n", N_SYNCED, N_COMPLETE, big_timestep_n_call, avg_balance_gpu_wait_ms);
	//	balance_gpu_wait_ms = 0;
	//	big_timestep_n_call = 0;
	//	if (avg_balance_gpu_wait_ms < 2e-3) {
	//		return;
	//	}
	//	int n_complete_decided = min(N_COMPLETE, N_SYNCED + 1);
	//	if (n_complete_decided == N_SYNCED) {
	//		return;
	//	}
	//	C_host_comp = (VALUE_TYPE*)realloc(C_host_comp, sizeof(VALUE_TYPE) * n_complete_decided * BLOCK_SIZE);

	//	for (int i = N_SYNCED * N_SYNCED; i < n_complete_decided * n_complete_decided; i++) {
	//		cudaMallocAsync(&(M[i].gpu_data), sizeof(VALUE_TYPE) * BLOCK_SIZE * BLOCK_SIZE, streams[1 % n_stream]);
	//		cudaMemcpyAsync(M[i].gpu_data, M[i].data, sizeof(VALUE_TYPE) * BLOCK_SIZE * BLOCK_SIZE, cudaMemcpyHostToDevice, streams[1 % n_stream]);
	//	}

	//	if (B_dev_comp) {
	//		cudaFreeAsync(B_dev_comp, streams[1 % n_stream]);
	//	}
	//	cudaMallocAsync(&B_dev_comp, sizeof(VALUE_TYPE) * n_complete_decided * BLOCK_SIZE, streams[1 % n_stream]);

	//	if (C_dev_comp) {
	//		cudaFreeAsync(C_dev_comp, streams[1 % n_stream]);
	//	}
	//	cudaMallocAsync(&C_dev_comp, sizeof(VALUE_TYPE) * n_complete_decided * BLOCK_SIZE, streams[1 % n_stream]);

	//	have_gpu_sync_stream = 1;
	//	N_SYNCED = n_complete_decided;
	//}

	void gpu_sync() {
		if ((N_COMPLETE_LAST == N_COMPLETE && big_timestep_n_call < 1000) || N_SYNCED >= N_COMPLETE) {
			return;
		}
		N_COMPLETE_LAST = N_COMPLETE;
		double avg_balance_gpu_wait_ms = balance_gpu_wait_ms / big_timestep_n_call;
		//printf("[SYNC] %d %d %d %e\n", N_SYNCED, N_COMPLETE, big_timestep_n_call, avg_balance_gpu_wait_ms);
		balance_gpu_wait_ms = 0;
		big_timestep_n_call = 0;
		if (avg_balance_gpu_wait_ms < 2e-3) {
			return;
		}
		int n_complete_decided = min(N_COMPLETE, N_SYNCED + 1);
		if (n_complete_decided == N_SYNCED) {
			return;
		}
		C_host_comp = (VALUE_TYPE*)realloc(C_host_comp, sizeof(VALUE_TYPE) * n_complete_decided * BLOCK_SIZE);

		for (int i = N_SYNCED * N_SYNCED; i < n_complete_decided * n_complete_decided; i++) {
			cudaMalloc(&(M[i].gpu_data), sizeof(VALUE_TYPE) * BLOCK_SIZE * BLOCK_SIZE);
			cudaMemcpy(M[i].gpu_data, M[i].data, sizeof(VALUE_TYPE) * BLOCK_SIZE * BLOCK_SIZE, cudaMemcpyHostToDevice);
		}

		if (B_dev_comp) {
			cudaFree(B_dev_comp);
		}
		cudaMalloc(&B_dev_comp, sizeof(VALUE_TYPE) * n_complete_decided * BLOCK_SIZE);

		if (C_dev_comp) {
			cudaFree(C_dev_comp);
		}
		cudaMalloc(&C_dev_comp, sizeof(VALUE_TYPE) * n_complete_decided * BLOCK_SIZE);

		N_SYNCED = n_complete_decided;
	}

public:
	IncreBLAS(int block_size) :BLOCK_SIZE(block_size) {
		cublasCreate(&cublas_handle);
		//streams = (cudaStream_t*)malloc(n_stream*sizeof(cudaStream_t));
		//for (int i = 0; i < n_stream; i++) {
		//	cudaError_t err = cudaStreamCreate(&streams[i]);
		//	if (err != 0) {
		//		printf("cudaStreamCreate error %d\n", err);
		//		exit(-1);
		//	}
		//}
		//cublasSetStream(cublas_handle, streams[0]);
	}

	//gemv: C = A*B, A是矩阵，B、C是向量, 被调用5次
	void incmv(VALUE_TYPE* A, int A_stride, VALUE_TYPE* B, VALUE_TYPE* C, int n) {
		//let C = 0
		memset(C, 0, n * sizeof(VALUE_TYPE));

		taskflow.clear();
		merge_scheduler(A, A_stride, n);
		executor.run(taskflow).wait();

		taskflow.clear();
		dnmv_scheduler(B, C);
		cpu_gemv_future = executor.run(taskflow);
		//if (have_gpu_sync_stream) {
		//	have_gpu_sync_stream = 0;
		//	cudaStreamSynchronize(streams[1 % n_stream]);
		//}
		dnmv_gpu(B, C);
		balance_timer.start();
		cpu_gemv_future.wait();
		balance_timer.stop();
		balance_gpu_wait_ms += balance_timer.get_time_ms();
		big_timestep_n_call++;
		for (int i = 0; i < N_SYNCED * BLOCK_SIZE; i++) {
			C[i] += C_host_comp[i];
		}

		gpu_sync();

		return;
	}
};


IncreBLAS* blas[5] = { NULL };
std::vector<Task*> tasks;
int time_step_big_counter = 0;
int gemv_range_counter = 0;

// IncreBLAS Test routine
// TODO : coroutine
DWORD WINAPI worker_routine(LPVOID task_lpvoid) {
	Task* task = (Task*)task_lpvoid;
	//double* tmp_B = (double*)malloc(sizeof(double) * task->n);
	//if (!tmp_B) {
	//	printf("Malloc failed\n");
	//	exit(-1);
	//}

	switch (task->type) {
	case(LIB_TASK_GEMV):
		//memset(tmp_B, 0, sizeof(double) * task->n);
		//if (task->increblas_object_id == 0) {
		//	//cblas_dgemv(CblasColMajor, CblasNoTrans, task->n, task->n, 1.0, task->A, task->A_stride, task->x, 1, 0.0, tmp_B, 1);
		//	for (int i = 0; i < task->n; i++) {
		//		tmp_B[i] = 0;
		//		for (int j = 0; j < task->n; j++) {
		//			tmp_B[i] += task->A[i + j * task->A_stride] * task->x[j];
		//		}
		//	}
		//	blas[task->increblas_object_id]->incmv(task->A, task->A_stride, task->x, task->B, task->n);
		//	double norm_residual = 0.0;
		//	double norm_rhs = 0.0;
		//	for (int i = 0; i < task->n; i++) {
		//		norm_residual += ((tmp_B[i] - task->B[i]) * (tmp_B[i] - task->B[i]));
		//		norm_rhs += (tmp_B[i] * tmp_B[i]);
		//	}
		//	norm_residual = sqrt(norm_residual);
		//	norm_rhs = sqrt(norm_rhs);
		//	
		//	if (norm_residual!=norm_residual || (norm_residual!=0 && norm_rhs==0) || norm_residual/norm_rhs > 1e-14) {
		//		printf("n = %d, residual = %le\n", task->n, norm_residual / norm_rhs);
		//		for (int i = 0; i < task->n; i++) {
		//			printf("{%e,%e,%e}\n", fabs(tmp_B[i] - task->B[i]), tmp_B[i], task->B[i]);
		//		}
		//		printf("\n");
		//		exit(-21);
		//	}
		//}
		//else {
		//	cblas_dgemv(CblasColMajor, CblasNoTrans, task->n, task->n, 1.0, task->A, task->A_stride, task->x, 1, 0.0, task->B, 1);
		//}

		blas[task->increblas_object_id]->incmv(task->A, task->A_stride, task->x, task->B, task->n);


		break;
	case (LIB_TASK_GETRF):
		LAPACKE_dgetrf(LAPACK_COL_MAJOR, task->n, task->n, task->A, task->A_stride, task->ipiv);
		break;
	case(LIB_TASK_GETRS):
		LAPACKE_dgetrs(LAPACK_COL_MAJOR, 'N', task->n, 1, task->A, task->A_stride, task->ipiv, task->B, task->n);
		break;
	default:
		exit(-2);
	}
	return 0;
}

void task_wait(long long& handler) {
	if (handler == 0) {
		return;
	}
	WaitForSingleObject(tasks[handler - 1]->hHandle, INFINITE);
	free(tasks[handler - 1]);
	tasks[handler - 1] = NULL;
	handler = 0;
}

extern "C" _declspec(dllexport) void lib_gemv_async(int* n, double** A, int* stride, double** x, int* x_start, int* x_end, double** y, long long* handler) {
	//printf("GEMV\n");
	tasks.push_back(NULL);
	*handler = tasks.size();

	if (*x_start == 0) {
		*x_start = 1;
	}
	if (*x_end == 0) {
		*x_end = *n;
	}

	if (blas[gemv_range_counter] == NULL) {
		if (gemv_range_counter < 3) {
			blas[gemv_range_counter] = new IncreBLAS(BLOCK_SIZE_GEMV_R);
		}
		else {
			blas[gemv_range_counter] = new IncreBLAS(BLOCK_SIZE_GEMV_L);
		}
	}

	Task* task = (Task*)malloc(sizeof(Task));
	task->type = LIB_TASK_GEMV;
	task->n = *n;
	task->A_stride = *stride;
	task->A = *A;
	task->x = *x + (*x_start - 1);
	task->B = *y;
	task->ipiv = NULL;
	task->solver_task_handler = *handler;
	task->increblas_object_id = gemv_range_counter;
	task->hHandle = CreateThread(NULL, 0, worker_routine, task, 0, NULL);

	gemv_range_counter = (gemv_range_counter + 1) % 5;
	tasks[*handler - 1] = task;
}

int big_step_counter = 0;
Timer global_timer;

extern "C" _declspec(dllexport) void lib_getrf_async(int* n, double** A, int** ipiv, int* info, long long* handler) {
	//setvbuf(stdout, NULL, _IONBF, 0);
	//printf("GETRF\n");

	*info = 0;
	tasks.push_back(NULL);
	*handler = tasks.size();

	//if (big_step_counter % 100 ==0) {
	//	global_timer.stop();
	//	printf("counter = %d, time = %lf ms.\n", big_step_counter, global_timer.get_time_ms());
	//	global_timer.start();
	//}
	//big_step_counter++;

	Task* task = (Task*)malloc(sizeof(Task));
	task->type = LIB_TASK_GETRF;
	task->n = *n;
	task->A_stride = *n;
	task->A = *A;
	task->x = NULL;
	task->B = NULL;
	task->ipiv = *ipiv;
	task->solver_task_handler = *handler;
	task->hHandle = CreateThread(NULL, 0, worker_routine, task, 0, NULL);

	tasks[*handler - 1] = task;
}

extern "C" _declspec(dllexport) void lib_getrs_async(int* n, double** A, int** ipiv, double** B, long long* handler) {
	//printf("GETRS\n");

	tasks.push_back(NULL);
	*handler = tasks.size();

	Task* task = (Task*)malloc(sizeof(Task));
	task->type = LIB_TASK_GETRS;
	task->n = *n;
	task->A_stride = *n;
	task->A = *A;
	task->x = NULL;
	task->B = *B;
	task->ipiv = *ipiv;
	task->solver_task_handler = *handler;
	task->hHandle = CreateThread(NULL, 0, worker_routine, task, 0, NULL);

	tasks[*handler - 1] = task;
}


extern "C" _declspec(dllexport) void lib_gemv_get(long long* handler) {
	if (*handler && tasks[*handler - 1] && (tasks[*handler - 1]->type != LIB_TASK_GEMV)) {
		printf("Getting results for GEMV, but recieved handler's type is %d.\n", tasks[*handler - 1]->type);
	}
	task_wait(*handler);
}
extern "C" _declspec(dllexport) void lib_getrf_get(long long* handler) {
	if (*handler && tasks[*handler - 1] && (tasks[*handler - 1]->type != LIB_TASK_GETRF)) {
		printf("Getting results for GETRF, but recieved handler's type is %d.\n", tasks[*handler - 1]->type);
	}
	task_wait(*handler);
}
extern "C" _declspec(dllexport) void lib_getrs_get(long long* handler) {
	if (*handler && tasks[*handler - 1] && (tasks[*handler - 1]->type != LIB_TASK_GETRS)) {
		printf("Getting results for GETRS, but recieved handler's type is %d.\n", tasks[*handler - 1]->type);
	}
	task_wait(*handler);
}