#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>

#pragma warning(disable : 4996)

/************************************************************************/
/*                            辅助函数                                  */
/************************************************************************/
float absolute(float a)
{
	if (a < 0)
	{
		return -a;
	}
	else
	{
		return a;
	}
}


float min(float a, float b)
{
	if (a > b)
	{
		return b;
	}
	else
	{
		return a;
	}
}


int equal(float a, float b)
{
	if (absolute(a - b)<0.00000001)
	{
		return 1;
	}
	else
	{
		return 0;
	}
}


char* F2S(float f, char* str)
{
	char str1[40];
	int j = 0, k, i;
	float d;

	d = absolute(f);
	i = (int)d;//浮点数的整数部分
	while (i > 0)
	{
		str1[j++] = i % 10 + '0';
		i = i / 10;
	}
	for (k = 0; k < j; k++)
	{
		str[k] = str1[j - 1 - k];//被提取的整数部分正序存放到另一个数组
	}

	if ((int)d == 0)
	{
		str[j++] = '0';
	}

	str[j++] = '.';

	d = d - (int)d;//小数部分提取
				   //printf("小数部分%f\n", d);
	for (i = 0; i < 4; i++)
	{
		d = d * 10;
		str[j++] = (int)d + '0';
		//printf("%c", str[j - 1]);
		d = d - (int)d;

	}
	/*while (str[--j] == '0');*/
	str[j] = '\0';

	/*printf("%c\n", str[0]);*/

	//处理负数
	if (f < 0)
	{
		j = 0;
		while (str[j] != '\0')
		{
			++j;
		}
		str[j + 1] = '\0';
		while (j > 0)
		{
			str[j] = str[j - 1];
			--j;
		}
		str[0] = '-';
	}
	return str;
}
/************************************************************************/
/*                            辅助函数                                  */
/************************************************************************/

typedef struct {
	int row, col;
	float **element;
}Mat;

/************************************************************************/
/*                              Mat操作                                  */
/************************************************************************/
Mat* MatCreate(Mat* mat, int row, int col)
{
	int i;

#ifdef MAT_LEGAL_CHECKING
	if (row <= 0 || col <= 0) {
		printf("err check, unmatch matrix for MatCreate\n");
		printf("\t\trow:\n\t\t\t");
		printf("%d\n", row);
		printf("\t\tcol:\n\t\t\t");
		printf("%d\n", col);
		return NULL;
	}
#endif


	mat->element = (float**)malloc(row * sizeof(float*));
	if (mat->element == NULL) {
		printf("mat create fail! size:[%d,%d]\n", row, col);
		return NULL;
	}
	for (i = 0; i < row; i++) {
		mat->element[i] = (float*)malloc(col * sizeof(float));
		if (mat->element[i] == NULL) {
			int j;
			printf("mat create fail! size:[%d,%d],spot:[%d]\n", row, col, i);
			for (j = 0; j < i; j++)
				free(mat->element[j]);
			free(mat->element);
			return NULL;
		}
	}

	mat->row = row;
	mat->col = col;

	//printf("Mat create succeed, addr:%p\n", &mat->element);
	return mat;
}


void MatDelete(Mat* mat)
{
	int i;

	for (i = 0; i < mat->row; i++)
		free(mat->element[i]);
	free(mat->element);
}


Mat* MatSetVal(Mat* mat, float* val)
{
	int row, col;

	for (row = 0; row < mat->row; row++) {
		for (col = 0; col < mat->col; col++) {
			(mat->element[row])[col] = val[col + row * mat->col];
		}
	}

	return mat;
}


void MatShape(const Mat* mat)
{
#ifdef MAT_LEGAL_CHECKING
	if (mat == NULL) {
		printf("err check for MatShape\n");
		return;
	}
#endif

	printf("Mat %dx%d:\n", mat->row, mat->col);
}


void MatDump(const Mat* mat)
{
	int row, col;
	char str[40];
	char * data;

#ifdef MAT_LEGAL_CHECKING
	if (mat == NULL) {
		printf("err check for MatDump\n");
		return;
	}
#endif

	printf("Mat %dx%d:\n", mat->row, mat->col);
	for (row = 0; row < mat->row; row++) {
		for (col = 0; col < mat->col; col++) {
			data = F2S((mat->element[row])[col], str);
			if (data[0] == '-') {
				printf(" %s\t", F2S((mat->element[row])[col], str));
			}
			else {
				printf("  %s\t", F2S((mat->element[row])[col], str));
			}

		}
		printf("\n");
	}
	printf("\n");
	//for (row = 0; row < mat->row; row++) {
	//	for (col = 0; col < mat->col; col++) {
	//		data = (mat->element[row])[col];
	//		if (data<0){
	//			printf("%.4f\t", data);
	//		}
	//		else{
	//			printf(" %.4f\t", data);
	//		}

	//	}
	//	printf("\n");
	//}
	//printf("\n");
}

/*全0阵*/
Mat* MatZeros(Mat* mat)
{
	int row, col;

	for (row = 0; row < mat->row; row++) {
		for (col = 0; col < mat->col; col++) {
			(mat->element[row])[col] = 0.0f;
		}
	}

	return mat;
}



/*全1阵*/
Mat* MatOnes(Mat* mat)
{
	int row, col;

	for (row = 0; row < mat->row; row++) {
		for (col = 0; col < mat->col; col++) {
			(mat->element[row])[col] = 1.0f;
		}
	}

	return mat;
}


/*对角1矩阵*/
Mat* MatEye(Mat* mat)
{
	int i;

	MatZeros(mat);
	for (i = 0; i < min(float(mat->row), float(mat->col)); i++) {
		(mat->element[i])[i] = 1.0f;
	}

	return mat;
}


/* dst = src1 + src2 */
Mat* MatAdd(Mat* src1, Mat* src2, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (!(src1->row == src2->row && src2->row == dst->row && src1->col == src2->col && src2->col == dst->col)) {
		printf("\t\terr check, unmatch matrix for MatAdd\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src1);
		printf("\t\t\t");
		MatShape(src2);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src1->row; row++) {
		for (col = 0; col < src1->col; col++) {
			(dst->element[row])[col] = (src1->element[row])[col] + (src2->element[row])[col];
		}
	}

	return dst;
}


/* dst = src1 - src2 */
Mat* MatSub(Mat* src1, Mat* src2, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (!(src1->row == src2->row && src2->row == dst->row && src1->col == src2->col && src2->col == dst->col)) {
		printf("\t\terr check, unmatch matrix for MatSub\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src1);
		printf("\t\t\t");
		MatShape(src2);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src1->row; row++) {
		for (col = 0; col < src1->col; col++) {
			(dst->element[row])[col] = (src1->element[row])[col] - (src2->element[row])[col];
		}
	}

	return dst;
}


/* dst = src1 x src2  稀疏寄存器缓存优化*/
Mat* MatMul(Mat* src1, Mat* src2, Mat* dst)
{
	int row, col;
	int i;

#ifdef MAT_LEGAL_CHECKING
	if (src1->col != src2->row || src1->row != dst->row || src2->col != dst->col) {
		printf("\t\terr check, unmatch matrix for MatMul\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src1);
		printf("\t\t\t");
		MatShape(src2);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif
	MatZeros(dst);
	float temp1 = 0.f;
	float temp2 = 0.f;
	for (row = 0; row < src1->row; row++) {
		for (col = 0; col < src1->col; col++) {
			temp1 = (src1->element[row])[col];
			if (equal(temp1, 0) == 0) {
				for (i = 0; i < src2->col; i++) {
					temp2 = (src2->element[col])[i];
					if (equal(temp2, 0) == 0) {
						(dst->element[row])[i] += temp1 * temp2;
					}
				}
			}
		}
	}

	return dst;
}


/* dst = src1 x src2 原始 */
Mat* MatMul2(Mat* src1, Mat* src2, Mat* dst)
{
	int row, col;
	int i;
	float temp;

#ifdef MAT_LEGAL_CHECKING
	if (src1->col != src2->row || src1->row != dst->row || src2->col != dst->col) {
		printf("\t\terr check, unmatch matrix for MatMul\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src1);
		printf("\t\t\t");
		MatShape(src2);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < dst->row; row++) {
		for (col = 0; col < dst->col; col++) {
			for (i = 0; i < src1->col; i++) {
				(dst->element[row])[col] += (src1->element[row])[i] * (src2->element[i])[col];
			}
		}
	}

	return dst;
}
////2.1 子矩阵乘法 C=A'*B
//void SMblock_MultCAOB(Mat *rawCAOB, Mat rawA, Mat rawB, int si, int sj, int sk, int subm, int subn, int subp, int T, int S) {
//	int i, j, k;
//	for (j = 0; j < subn; j++) { //列号
//		for (i = 0; i < subm; i++) { //行号
//			for (k = 0; k < subp; k++) { //并行
//										 //printf("子块乘：C[%d][%d]+=A[%d][%d]*B[%d][%d] \n",sj * T + j,sk * S + k,si*S + i,sj * T + j,si * T + i,sk*S + k);
//										 //C[j * p + k]+= A[i*m + j] * B[i * p + k];  //参考
//				(rawCAOB->element[sj * T + j])[sk * T + k] += (rawA.element[si*S + i])[sj * T + j] * (rawB.element[si*S + i])[sk * T + k];
//
//			}
//		}
//	}
//}
//
//
///* dst = src1 x src2 缓存优化*/
//Mat* MatMul2(Mat* src1, Mat* src2, Mat* dst)
//{
//
//#ifdef MAT_LEGAL_CHECKING
//	if (src1->col != src2->row || src1->row != dst->row || src2->col != dst->col) {
//		printf("\t\terr check, unmatch matrix for MatMul\n");
//		printf("\t\tsrcMatShape:\n\t\t\t");
//		MatShape(src1);
//		printf("\t\t\t");
//		MatShape(src2);
//		printf("\t\tdstMatShape:\n\t\t\t");
//		MatShape(dst);
//		return NULL;
//	}
//#endif
//
//	int rawm = src1->row;
//	int rawn = src2->col;
//	int rawp = src2->col;
//
//	// 稍后计算
//	int S = 1;  //块矩阵的行
//	int T = 2;  //块矩阵的列
//
//	//分块后子矩阵的个数h*l,A矩阵分为S*T的子矩阵，B矩阵分为T*S的子矩阵
//	static int col_M = 1;
//	static int row_N = 1;
//
//	//矩阵A、B分块后，不能全分块时，最后一行和最后一列的子矩阵的大小
//	static int col_last = 1;
//	static int row_last = 1;
//
//	//====================================================================================
//	//将矩阵rawA[rawm][rawn]分为C_M*R_N个大小为S*T的子块，ceil(x)函数返回不小于x的最小整数
//	if (rawm % S == 0) {
//		col_M = rawm / S;
//	}
//	else {
//		col_M = rawm / S + 1;
//	}
//	//AC_M = ceil((double) rawm / (double) S); //矩阵A分块后的行数
//	row_N = ceil((double)rawn / (double)T); //矩阵A分块后的列数，即矩阵B分块后的行数
//	col_last = rawm - (col_M - 1) * S;//最后一行
//	row_last = rawn - (row_N - 1) * T;//最后一列
//	printf("%d\n", row_N);
//	printf("%d\n", col_M);
//	printf("%d\n", row_last);
//	printf("%d\n", col_last);
//	//====================================================================================
//	//MatDump(dst);
//	int i, j, k;
//	int count = 0;//循环计数
//				  //循环 的顺序可根据需要更换，不会影响计算的结果
//	for (j = 0; j < row_N; j++) {
//		for (i = 0; i < col_M; i++) {
//			for (k = 0; k < row_N; k++) {
//								printf("\t 第%d层循环：  ",++count);
//				//				printf(" 分块乘法：C[%d][%d]+=A[%d][%d]*B[%d][%d] \n",j,k,i,j,i,k);
//				int mblk = S, nblk = T, pblk = T;//默认当前参与运算的两个子矩阵块的大小，必须每次循环重新赋初值
//												 //计算当前子块的大小为mblk*nblk
//				if ((i == col_M - 1)) {
//					mblk = col_last;
//				}
//				if (j == row_N - 1) {
//					nblk = row_last;
//				}
//				if (k == row_N - 1) {
//					pblk = row_last;
//				}
//
//				//分块矩阵乘法C=A'*B
//				//SMblock_MultCAOB(i, j, k, mblk, nblk, mblk);
//				SMblock_MultCAOB(dst, *src1, *src2, i, j, k, mblk, nblk, pblk, T, S);
//			}
//		}
//	}
//
//	return dst;
//}


/* dst = src1 * src2 */   // Hadamard product
Mat* MatProduct(Mat* src1, Mat* src2, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (!(src1->row == src2->row && src2->row == dst->row && src1->col == src2->col && src2->col == dst->col)) {
		printf("\t\terr check, unmatch matrix for MatAdd\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src1);
		printf("\t\t\t");
		MatShape(src2);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src1->row; row++) {
		for (col = 0; col < src1->col; col++) {
			(dst->element[row])[col] = (src1->element[row])[col] * (src2->element[row])[col];
		}
	}

	return dst;
}


/* dst = src1 / src2 */
Mat* MatDiv(Mat* src1, Mat* src2, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (!(src1->row == src2->row && src2->row == dst->row && src1->col == src2->col && src2->col == dst->col)) {
		printf("\t\terr check, unmatch matrix for MatDiv\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src1);
		printf("\t\t\t");
		MatShape(src2);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src1->row; row++) {
		for (col = 0; col < src1->col; col++) {
			(dst->element[row])[col] = (src1->element[row])[col] / (src2->element[row])[col];
		}
	}

	return dst;
}


/* dst = num * src 矩阵数乘运算 */
Mat* MatNumMul(float num, Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatNumMul\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = num * (src->element[row])[col];
	}

	return dst;
}


/* dst = num + src 矩阵数加运算 */
Mat* MatNumAdd(float num, Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatNumAdd\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = num + (src->element[row])[col];
	}

	return dst;
}


/* dst = src^T */
Mat* MatTrans(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->col || src->col != dst->row) {
		printf("err check, unmatch matrix for MatTranspose\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < dst->row; row++) {
		for (col = 0; col < dst->col; col++) {
			(dst->element[row])[col] = (src->element[col])[row];
		}
	}

	return dst;
}






/*dst = sum(src)  src 矩阵的每一行相加*/
Mat* MatRowSum(Mat* src, Mat* dst)
{
	int row, col;
	float temp;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || dst->col != 1) {
		printf("err check, unmatch matrix for MatSum\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		temp = 0;
		for (col = 0; col < src->col; col++) {
			temp += (src->element[row])[col];
			//printf("%f\t", temp);
		}
		(dst->element[row])[0] = temp;
		//printf("%f\n", temp);
	}

	return dst;
}


/*dst = MatMax(src)  src 找出矩阵的每一行最大值*/
Mat* MatRowMax(Mat* src, Mat* dst)
{
	int row, col;
	float temp;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || dst->col != 1) {
		printf("err check, unmatch matrix for MatMax\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		temp = (src->element[row])[0];
		for (col = 1; col < src->col; col++) {
			if ((src->element[row])[col] > temp) {
				temp = (src->element[row])[col];
			}
		}
		(dst->element[row])[0] = temp;
	}

	return dst;
}

/* dst = src * src (Hadamard product)*/
Mat* MatSquare(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("err check, unmatch matrix for MatSquare\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			(dst->element[row])[col] = (src->element[row])[col] * (src->element[row])[col];
		}
	}

	return dst;
}


/* dst = Sqrt(src) */
Mat* MatSqrt(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("err check, unmatch matrix for MatSqrt\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			(dst->element[row])[col] = sqrt(float((src->element[row])[col]));
		}
	}

	return dst;
}


/*dst = MatExp(src)  指数作用*/
Mat* MatExp(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("err check, unmatch matrix for MatExp\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			(dst->element[row])[col] = (float)(exp(float((src->element[row])[col])));
		}
	}

	return dst;
}




/* dst = src - vector   矩阵减向量 行减*/
Mat* MatVectorSub(Mat* src, Mat* vector, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col || vector->row != src->row || vector->col != 1) {
		printf("err check, unmatch matrix for MatVectorSub\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tvectorShape:\n\t\t\t");
		MatShape(vector);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif


	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			(dst->element[row])[col] = (src->element[row])[col] - (vector->element[row])[0];
		}
	}

	return dst;

}


/* dst = src / vector   矩阵除向量 行除*/
Mat* MatVectorDiv(Mat* src, Mat* vector, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col || vector->row != src->row || vector->col != 1) {
		printf("err check, unmatch matrix for MatVectorSub\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tvectorShape:\n\t\t\t");
		MatShape(vector);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}

	for (row = 0; row < src->row; row++) {
		if (absolute((vector->element[row])[0]) < 0.000001) {
			printf("err check, Divisor vector has zero.\n");
			return NULL;
		}
	}
#endif


	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			(dst->element[row])[col] = (src->element[row])[col] / (vector->element[row])[0];
		}
	}

	return dst;

}


/* dst = src 内存拷贝 */
void MatCopy(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatCopy\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = (src->element[row])[col];
	}
}

//--------------------------------Matplus--------------------------------//

/* dst = src^+   col+1*/
void MatPlusCol(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || (src->col) + 1 != dst->col) {
		printf("\t\terr check, unmathed matrix for MatPlus\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return;
	}
#endif
	for (row = 0, col = 0; row < dst->row; row++) {
		(dst->element[row])[col] = 1;
	}
	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col + 1] = (src->element[row])[col];
	}
}



/* dst = src^+    row+1*/
void MatPlusRow(Mat* src, Mat* dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row + 1 != dst->row || (src->col) != dst->col) {
		printf("\t\terr check, unmathed matrix for MatPlus\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return;
	}
#endif
	for (row = 0, col = 0; col < dst->col; col++) {
		(dst->element[row])[col] = 0.f;
	}
	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row + 1])[col] = (src->element[row])[col];
	}
}

// w:Constant +1
void MatPlusRowConstant(Mat* src,Mat* dst,float cst)
{
	int row,col;

	#ifdef MAT_LEGAL_CHECKING
	if (src->row + 1 != dst->row || (src->col) != dst->col) {
		printf("\t\terr check, unmathed matrix for MatPlus\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return;
	}
#endif
	for (row = 0, col = 0; col < dst->col; col++) {
		(dst->element[row])[col] = cst;
	}
	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row + 1])[col] = (src->element[row])[col];
	}
}

// w:Random Initialization
void MatReduceRow(Mat* src,Mat* dst)
{
	int row,col;

	#ifdef MAT_LEGAL_CHECKING
	if (src->row - 1 != dst->row || (src->col) != dst->col) {
		printf("\t\terr check, unmathed matrix for MatPlus\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return;
	}
#endif
	for (row = 1; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row - 1])[col] = (src->element[row])[col];
	}
}

/* dst = src1 * src2 */   // Hadamard product
Mat* MatplusProduct(Mat* src1, Mat* src2, Mat* dst, int hgt)
{
	int i, j, k;
	for (i = 0; i < hgt; ++i)
	{
		for (j = 0; j < dst[0].row; ++j)
		{
			for (k = 0; k < dst[0].col; ++k)
			{
				(dst[i].element[j])[k] = (src1[i].element[j])[k] * (src2[i].element[j])[k];
			}
		}
	}
	return dst;
}

void MatplusShape(Mat* mat, int mat_h)
{
	if (mat == NULL) {
		printf("err check for MatplusShape\n");
		return;
	}
	printf("Mat %dx%dx%d:\n", mat_h, mat->row, mat->col);
}

Mat* MatplusZeros(Mat* des, int des_h)
{
	int i;
	for (i = 0; i < des_h; ++i)
	{
		MatZeros(&des[i]);
	}
	return des;
}


//--------------------------------Matplus--------------------------------//


/************************************************************************/
/*                              Mat操作                                  */
/************************************************************************/

float *XVal = NULL;
float *YVal = NULL;

typedef struct {
	// int OnehotOptions;			// 0 for non-onehot,1 for onehot

	int CompleteSampleNum;		// number of complete samples [int]
	int TrainSampleNum;			// number of training samples [int]
	int TestSampleNum;			// number of test samples [int]
	int Height;					// heighr of each sample [int]
	int Width;					// width of each sample [int]
	int Channel;				// channel of each sample [int]
	int ClassificationNum;		// number of categories classified [int]
	int LabelNum;				// label number of each sample [int]

	// int *Optimization;			// Optimization Method chosen and its parameters[int*]
	int HiddenLayerNum;			// number of hidden layer [int]
	int *LayerType;				// type of correspond layer: 0 for fc,1 for c,2 for dec,3 for max-p,4 for aver-p, (HiddenLayerNum+2) in total, the first[0] is non-sense
	int *LayerParameters;		// Parameters of each layer: fc with 1 para, cp with 8 paras in order of padding,k1,k2,stride[2],outchannels
	int ParametersTotal;		// Number of Parameters of all Net
	int WeightInitWayNum;       // weight initialization mode [int]
								//float *XValArray;			// samples features value [float*]
								//float *YValArray;			// samples labels value [float*]
	int *ActiFuncNumArray;		// activate functions of every layers [int*]
	int LossFuncNum;			// loss function [int]
	int BatchSize;				// batch size for optimization

	int OptFuncNum;				// Optimization Method chosen:0 for BGD, 1 for Adam
	int *OptPara;				// Optimization Parameters
}Predefine;

/************************************************************************/
/*                          数据集读取操作                               */
/************************************************************************/
int DR_pow256(int a)
{
	int sum = 1;
	int i = 0;
	while (i < a)
	{
		sum = sum * 256;
		i++;
	}
	return sum;
}

// float DR_mean(Predefine *pdf)
// {
// 	int i;
// 	double mean = 0.0;
// 	for (i = 0; i < pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel; ++i)
// 	{
// 		mean += (double)XVal[i] / (pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel);
// 	}
// 	return (float)mean;
// }

// float DR_std(Predefine *pdf, float mean)
// {
// 	int i;
// 	double squrt = 0.0;
// 	for (i = 0; i < pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel; ++i)
// 	{
// 		squrt += (double)((XVal[i] - mean)*(XVal[i] - mean)) /
// 			(pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel);
// 	}
// 	squrt = sqrt(squrt);
// 	return (float)squrt;
// }

// void DR_Normalize(Predefine *pdf)
// {
// 	float mean = DR_mean(pdf);
// 	float std = DR_std(pdf, mean);
// 	//printf("%f %f \n", mean, std);
// 	int i;
// 	for (i = 0; i < pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel; ++i)
// 	{
// 		XVal[i] = (XVal[i] - mean) / std;
// 	}
// }

// float DR_intMean(int NHWC)
// {
// 	int i;
// 	double mean = 0.0;
// 	for (i = 0; i < NHWC; ++i)
// 	{
// 		mean += (double)XVal[i] / (NHWC);
// 	}
// 	return (float)mean;
// }

// float DR_intStd(int NHWC, float mean)
// {
// 	int i;
// 	double squrt = 0.0;
// 	for (i = 0; i < NHWC; ++i)
// 	{
// 		squrt += (double)((XVal[i] - mean)*(XVal[i] - mean)) /
// 			(NHWC);
// 	}
// 	squrt = sqrt(squrt);
// 	return (float)squrt;
// }

void DR_intNormalize(int NHWC)
{
	double mean = 0.0;
	double std = 0.0;
	//printf("%f %f \n", mean, std);
	int i;
	char buf1[20], buf2[20];
	for (i = 0; i < NHWC; ++i)
	{
		mean += (double)XVal[i] / (NHWC);
		/*if(i > 54879500)
			printf("%s %s\n", F2S(XVal[i], buf1), F2S(mean,buf2));*/
	}
	for (i = 0; i < NHWC; ++i)
	{
		std += (double)((XVal[i] - mean)*(XVal[i] - mean)) / (NHWC);
	}
	std = sqrt(std);
	for (i = 0; i < NHWC; ++i)
	{
		XVal[i] = (XVal[i] - mean) / std;
	}
	//printf("%s %s\n", F2S(mean, buf1), F2S(std,buf2));
}

int DR_loadDS(int TrainSampleNum, int TestSampleNum, int Height, int Width, int Channel, int clsn, Predefine *pdf)
{
    FILE *fx1 = fopen(".\\dataset\\train_data.txt", "r");
	FILE *fx2 = fopen(".\\dataset\\test_data.txt", "r");
	FILE *fy1 = fopen(".\\dataset\\train_label.txt", "r");
	FILE *fy2 = fopen(".\\dataset\\test_label.txt", "r");
	if (fx1 == NULL) return 1;
	if (fx2 == NULL) return 2;
	if (fy1 == NULL) return 3;
	if (fy2 == NULL) return 4;

    pdf->TrainSampleNum = TrainSampleNum;
    pdf->TestSampleNum = TestSampleNum;
    pdf->Height = Height;
    pdf->Width = Width;
    pdf->Channel = Channel;
    pdf->ClassificationNum = clsn;
    pdf->CompleteSampleNum = pdf->TrainSampleNum + pdf->TestSampleNum;
    // pdf->OnehotOptions = 1;                     //
    pdf->LabelNum = pdf->ClassificationNum;        //

    XVal = (float *)malloc(pdf->CompleteSampleNum * pdf->Height
		* pdf->Width * pdf->Channel * sizeof(float));
	YVal = (float*)malloc(pdf->LabelNum * pdf->CompleteSampleNum * sizeof(float));

    int i;
	int tmp;
	for (i = 0; i < pdf->TrainSampleNum * pdf->Height * pdf->Width * pdf->Channel; ++i)
	{
        fscanf(fx1, "%d", &tmp);
		*(XVal + i) = (float)tmp;
	}
	for (i = pdf->TrainSampleNum * pdf->Height * pdf->Width * pdf->Channel;
		i < pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel; ++i)
	{
		fscanf(fx2, "%d", &tmp);
		*(XVal + i) = (float)tmp;
	}
	for (i = 0; i < pdf->LabelNum * pdf->TrainSampleNum; ++i)
	{
        fscanf(fy1, "%d", &tmp);
        *(YVal + i) = (float)tmp;
	}
	for (i = pdf->TrainSampleNum;
		i < pdf->CompleteSampleNum; ++i)
	{
        fscanf(fy2, "%d", &tmp);
        *(YVal + i) = (float)tmp;
	}

    int NHWC = pdf->CompleteSampleNum * pdf->Height * pdf->Width * pdf->Channel;
	fclose(fx1);
	fclose(fx2);
	fclose(fy1);
	fclose(fy2);
    DR_intNormalize(NHWC);
	return 0;
}

/************************************************************************/
/*                          数据集读取操作                                */
/************************************************************************/

typedef struct {
	Mat CompleteFeatureDataSet;		// complete featrue Mat for FCNN
	Mat CompleteLabelDataSet;		// complete label Mat
	Mat CompleteTrainFeature;		// complete featrue Mat for FCNN training
	Mat CompleteTrainLabel;			// complete label Mat
	Mat *BatchTrainFeature;			// batch featrue Mat for FCNN training [4 dimensions]
	Mat *BatchTrainLabel;			// batch label Mat [4 dimensions]
	Mat TestFeature;				// featrue Mat for FCNN test
	Mat TestLabel;					// label Mat without onehot

	int CompleteSampleNum;			// number of all samples [int]
	int TrainSampleNum;				// number of training samples [int]
	int TestSampleNum;				// number of test samples [int]

	int SampleChannels;				// number of samples' channels
	int SampleRow;					// number of samples' row
	int SampleCol;					// number of samples' column
	int ClassificationNum;			// Number of categories classified [int]
	int LabelNum;					// Number of label [int]
	int BatchSize;					// batch size for dataset from pdf
	int BatchNum;					// number of batch
	int remainder;					// the last batch size
}DataSet;

typedef struct {
	int LayerType;				// Layer Type:0 for full-connection, 1 for convolution, 2 for deconvolution, 3 for max-pool, 4 for average-pool [int]
	int PaddingMode;			// Padding Mode:0 for VALID ,1 for SAME [int]
	int ActiFuncNum;			// Active Function for Conv: 1 for sigmoid, 2 for tanh, 3 for relu, 4 for l-relu, 5 for softmax [int]
	int KernelSize[2];			// Size of Kernel [k1,k2] [int[2]]
	int Stride[2];				// Operation Stride [weight,height] [int[2]] // could be 4
	Mat* OriginMat;				// Result of Padding of the Input [Mat*] with RC
	//Mat* ReConstruct;			// Reconstruct Mat for Operation (with bias for concolution) [Mat*]
	Mat KernelMat;				// Operation Kernel of this layer [Mat]
	Mat WeightMat;				// Kernel with bias for Concolution [Mat]
	Mat* SumMat;				// Sum Mat of this Layer [Mat*]
	Mat* ActiMat;				// Activate Sum Mat of Concolution [Mat*]
	Mat* DeriMat;				// Derication Activate Mat of Concolution [Mat*]
	Mat* DeltaMat;				// Error Delever Mat [Mat*]
	Mat NablaMat;				// Error on Weight Mat [Mat*]
	//int OutSize[3];				// Size of output [channel,height,weight] [int[4]]
	int OutH;					// Height of ActiMat
	int OutW;					// Weight of ActiMat
	int OutC;					// Channel of ActiMat
}Layer;

typedef struct {
	int sampleCapacity;         // number of current samples [int]
	int SampleRow;				// row(features) of sample [int]
	int SampleCol;				// column(features) of sample [int]
	int SampleCha;				// channels(features) of sample [int]
	int HiddenLayerNum;         // number of hidden layer [int]
	int WeightInitWayNum;       // weight initialization mode [int]
	//int *LayerType;				// layer type of each layers,  [int*]
	//int ParameterTotal;			// number of Parameters in this network [int]
	//int *LayerParameters;		// Parameters of each layer: fc with 1 para, cp with 8 paras in order of padding,k1,k2,outchannels,stride[4]
	/*int BatchNum;
	int BatchSize;
	int reminder;*/

	Layer *Layers;				// layers of FCNN
	Mat OnehotMat;              // onehot code Matrix

	int OptFuncNum;				// Optimization Method chosen:0 for BGD, 1 for Adam
	int ClassificationNum;      // Number of categories classified [int]
	int LossFuncNum;            // loss function [int]
}CNN;

/************************************************************************/
/*                            初始化操作                                 */
/************************************************************************/

void InitPredefine(Predefine *pdf)
{
	// pdf->OnehotOptions = -1;
	pdf->CompleteSampleNum = -1;
	pdf->TrainSampleNum = -1;
	pdf->TestSampleNum = -1;
	pdf->Width = -1;
	pdf->Height = -1;
	pdf->Channel = -1;
	pdf->ClassificationNum = -1;
	pdf->LabelNum = -1;
	pdf->HiddenLayerNum = -1;
	pdf->LayerType = NULL;
	pdf->LayerParameters = NULL;
	pdf->ParametersTotal = -1;
	pdf->WeightInitWayNum = -1;
	// pdf->ActiFuncNumArray = NULL;
	pdf->LossFuncNum = -1;
	pdf->BatchSize = -1;
	pdf->OptFuncNum = -1;
	//pdf->Optimization = NULL;
}

void InitDataSet(DataSet *ds)
{
	ds->CompleteFeatureDataSet.element = NULL;
	ds->CompleteLabelDataSet.element = NULL;
	ds->CompleteTrainFeature.element = NULL;
	ds->CompleteTrainLabel.element = NULL;
	ds->BatchTrainFeature = NULL;
	ds->BatchTrainLabel = NULL;
	ds->TestFeature.element = NULL;
	ds->TestLabel.element = NULL;
	ds->CompleteSampleNum = -1;
	ds->TrainSampleNum = -1;
	ds->TestSampleNum = -1;
	ds->SampleChannels = -1;
	ds->SampleRow = -1;
	ds->SampleCol = -1;
	ds->ClassificationNum = -1;
	ds->LabelNum = -1;
	ds->BatchSize = -1;
	ds->BatchNum = -1;
	ds->remainder = -1;
}

void InitLayer(Layer *ncp)
{
	ncp->LayerType = -1;
	ncp->PaddingMode = -1;
	ncp->ActiFuncNum = -1;
	ncp->KernelSize[0] = -1; ncp->KernelSize[1] = -1; // ncp->KernelSize[2] = -1; ncp->KernelSize[3] = -1;
	ncp->Stride[0] = -1; ncp->Stride[1] = -1;// ncp->Stride[2] = -1; ncp->Stride[3] = -1;
	ncp->OriginMat = NULL;
	//ncp->ReConstruct = NULL;
	ncp->KernelMat.element = NULL;
	ncp->WeightMat.element = NULL;
	ncp->SumMat = NULL;
	ncp->ActiMat = NULL;
	ncp->DeriMat = NULL;
	//ncp->OutSize[0] = -1; ncp->OutSize[1] = -1; ncp->OutSize[2] = -1;// ncp->OutSize[3] = -1;
	ncp->OutH = -1; ncp->OutW = -1; ncp->OutC = -1;
}

void InitCNN(CNN *cnn)
{
	cnn->sampleCapacity = -1;
	cnn->SampleCol = -1;
	cnn->SampleRow = -1;
	cnn->SampleCha = -1;
	//cnn->LayerNum = -1;
	cnn->HiddenLayerNum = -1;
	cnn->WeightInitWayNum = -1;
	//cnn->ParameterTotal = -1;
	//cnn->LayerType = NULL;
	//cnn->LayerParameters = NULL;
	/*cnn->BatchNum = -1;
	cnn->BatchSize = -1;
	cnn->reminder = -1;*/
	cnn->Layers = NULL;
	cnn->OnehotMat.element = NULL;
	cnn->OptFuncNum = -1;
}

/************************************************************************/
/*                            初始化操作                                 */
/************************************************************************/

/************************************************************************/
/*                             dump操作                                  */
/************************************************************************/

void DumpIntArray(int* array, int n) {
	int i;
	for (i = 0; i < n; ++i) {
		printf("%d\t", array[i]);
	}
	printf("\n");
}

void DumpFloatArray(float* array, int n) {
	char str[40];
	int i;
	for (i = 0; i < n; ++i) {
		// printf("%s", F2S(array[i], str));
		printf("%e", array[i]);
		printf("\t");
	}
	printf("\n");
}

int DumpPredefine(Predefine pdf)
{
	printf("============================================================== Predefine Dump =================================================================\n");
	if (pdf.CompleteSampleNum == -1) {
		printf("\t\t\tPredefine parameter 'CompleteSampleNum' uninitialized!!!\n");
		return -1;
	}
	printf("CompleteSampleNum:\t%d\n", pdf.CompleteSampleNum);

	if (pdf.TrainSampleNum == -1) {
		printf("\t\t\tPredefine parameter 'TrainSampleNum' uninitialized!!!\n");
		return -1;
	}
	printf("TrainSampleNum:\t\t%d\n", pdf.TrainSampleNum);

	if (pdf.TestSampleNum == -1) {
		printf("\t\t\tPredefine parameter 'TestSampleNum' uninitialized!!!\n");
		return -1;
	}
	printf("TestSampleNum:\t\t%d\n", pdf.TestSampleNum);

	if (pdf.Height == -1) {
		printf("\t\t\tPredefine parameter 'Height' uninitialized!!!\n");
		return -1;
	}
	printf("Height:\t\t\t%d\n", pdf.Height);

	if (pdf.Width == -1) {
		printf("\t\t\tPredefine parameter 'Width' uninitialized!!!\n");
		return -1;
	}
	printf("Width:\t\t\t%d\n", pdf.Width);

	if (pdf.Channel == -1) {
		printf("\t\t\tPredefine parameter 'Channel' uninitialized!!!\n");
		return -1;
	}
	printf("Channel:\t\t%d\n", pdf.Channel);

	if (pdf.ClassificationNum == -1) {
		printf("\t\t\tPredefine parameter 'ClassificationNum' uninitialized!!!\n");
		return -1;
	}
	printf("ClassificationNum:\t%d\n", pdf.ClassificationNum);

	// if (pdf.OnehotOptions == -1) {
	// 	printf("\t\t\tPredefine parameter 'OnehotOptions' uninitialized!!!\n");
	// 	return -1;
	// }
	// printf("OnehotOptions:\t\t%d\n", pdf.OnehotOptions);

	if (pdf.LabelNum == -1) {
		printf("\t\t\tPredefine parameter 'LabelNum' uninitialized!!!\n");
		return -1;
	}
	printf("LabelNum:\t\t%d\n", pdf.LabelNum);

	if (pdf.HiddenLayerNum == -1) {
		printf("\t\t\tPredefine parameter 'HiddenLayerNum' uninitialized!!!\n");
		return -1;
	}
	printf("HiddenLayerNum:\t\t%d\n", pdf.HiddenLayerNum);

	if (pdf.WeightInitWayNum == -1) {
		printf("\t\t\tPredefine parameter 'WeightInitWayNum' uninitialized!!!\n");
		return -1;
	}
	printf("WeightInitWayNum:\t%d\n", pdf.WeightInitWayNum);

	if (pdf.LossFuncNum == -1) {
		printf("\t\t\tPredefine parameter 'LossFuncNum' uninitialized!!!\n");
		return -1;
	}
	printf("LossFuncNum:\t\t%d\n", pdf.LossFuncNum);

	if (pdf.BatchSize == -1) {
		printf("\t\t\tPredefine parameter 'BatchSize' uninitialized!!!\n");
		return -1;
	}
	printf("BatchSize:\t\t%d\n", pdf.BatchSize);

	if (pdf.LayerType == NULL) {
		printf("\t\t\tPredefine parameter 'LayerType' uninitialized!!!\n");
		return -1;
	}
	printf("LayerType:\n");
	DumpIntArray(pdf.LayerType, pdf.HiddenLayerNum + 2);

	if (pdf.ParametersTotal == -1) {
		printf("\t\t\tPredefine parameter 'ParametersTotal' uninitialized!!!\n");
		return -1;
	}
	printf("ParametersTotal:\t%d\n", pdf.ParametersTotal);

	if (pdf.LayerParameters == NULL) {
		printf("\t\t\tPredefine parameter 'LayerParameters' uninitialized!!!\n");
		return -1;
	}
	printf("LayerParameters:\n");
	DumpIntArray(pdf.LayerParameters, pdf.ParametersTotal);

	// if (pdf.ActiFuncNumArray == NULL) {
	// 	printf("\t\t\tPredefine parameter 'ActiFuncNumArray' uninitialized!!!\n");
	// 	return -1;
	// }
	// printf("ActiFuncNumArray:\n");
	// DumpIntArray(pdf.ActiFuncNumArray, pdf.HiddenLayerNum + 2);

	printf("========================================================== Predefine Dump Finished ============================================================\n");


	return 0;
}

int DumpDataSet(DataSet dst)
{
	printf("=============================================================== DataSet Dump ==================================================================\n");
	if (dst.CompleteSampleNum == -1) {
		printf("\t\t\tDataSet parameter 'CompleteSampleNum' uninitialized!!!\n");
		return -1;
	}
	printf("CompleteSampleNum:\t%d\n", dst.CompleteSampleNum);

	if (dst.TrainSampleNum == -1) {
		printf("\t\t\tDataSet parameter 'TrainSampleNum' uninitialized!!!\n");
		return -1;
	}
	printf("TrainSampleNum:\t\t%d\n", dst.TrainSampleNum);

	if (dst.TestSampleNum == -1) {
		printf("\t\t\tDataSet parameter 'TestSampleNum' uninitialized!!!\n");
		return -1;
	}
	printf("TestSampleNum:\t\t%d\n", dst.TestSampleNum);

	if (dst.SampleRow == -1) {
		printf("\t\t\tDataSet parameter 'SampleRow' uninitialized!!!\n");
		return -1;
	}
	printf("SampleRow:\t\t%d\n", dst.SampleRow);

	if (dst.SampleCol == -1) {
		printf("\t\t\tDataSet parameter 'SampleCol' uninitialized!!!\n");
		return -1;
	}
	printf("SampleCol:\t\t%d\n", dst.SampleCol);

	if (dst.SampleChannels == -1) {
		printf("\t\t\tDataSet parameter 'SampleChannels' uninitialized!!!\n");
		return -1;
	}
	printf("SampleChannels:\t\t%d\n", dst.SampleChannels);

	if (dst.ClassificationNum == -1) {
		printf("\t\t\tDataSet parameter 'ClassificationNum' uninitialized!!!\n");
		return -1;
	}
	printf("ClassificationNum:\t%d\n", dst.ClassificationNum);

	if (dst.LabelNum == -1)
	{
		printf("\t\t\tDataSet parameter 'LabelNum' uninitialized!!!\n");
		return -1;
	}
	printf("LabelNum:\t\t%d\n", dst.LabelNum);

	if (dst.BatchNum == -1)
	{
		printf("\t\t\tDataSet parameter 'BatchNum' uninitialized!!!\n");
		return -1;
	}
	printf("BatchNum:\t\t%d\n", dst.BatchNum);

	if (dst.BatchSize == -1)
	{
		printf("\t\t\tDataSet parameter 'BatchSize' uninitialized!!!\n");
		return -1;
	}
	printf("BatchSize:\t\t%d\n", dst.BatchSize);

	if (dst.remainder == -1)
	{
		printf("\t\t\tDataSet parameter 'remainder' uninitialized!!!\n");
		return -1;
	}
	printf("remainder:\t\t%d\n", dst.remainder);

	if (dst.CompleteFeatureDataSet.element == NULL)
	{
		printf("\t\t\tDataSet parameter 'CompleteFeatureDataSet' uninitialized!!!\n");
		return -1;
	}
	printf("CompleteFeatureDataSet:\t[%d,%d]\n"
		, dst.CompleteFeatureDataSet.row, dst.CompleteFeatureDataSet.col);

	if (dst.CompleteLabelDataSet.element == NULL)
	{
		printf("\t\t\tDataSet parameter 'CompleteLabelDataSet' uninitialized!!!\n");
		return -1;
	}
	printf("CompleteLabelDataSet:\t[%d,%d]\n"
		, dst.CompleteLabelDataSet.row, dst.CompleteLabelDataSet.col);

	if (dst.CompleteTrainFeature.element == NULL)
	{
		printf("\t\t\tDataSet parameter 'CompleteTrainFeature' uninitialized!!!\n");
		return -1;
	}
	printf("CompleteTrainFeature:\t[%d,%d]\n"
		, dst.CompleteTrainFeature.row, dst.CompleteTrainFeature.col);

	if (dst.CompleteTrainLabel.element == NULL)
	{
		printf("\t\t\tDataSet parameter 'CompleteTrainLabel' uninitialized!!!\n");
		return -1;
	}
	printf("CompleteTrainLabel:\t[%d,%d]\n"
		, dst.CompleteTrainLabel.row, dst.CompleteTrainLabel.col);

	if (dst.TestFeature.element == NULL)
	{
		printf("\t\t\tDataSet parameter 'TestFeature' uninitialized!!!\n");
		return -1;
	}
	printf("TestFeature:\t\t[%d,%d]\n"
		, dst.TestFeature.row, dst.TestFeature.col);

	if (dst.TestLabel.element == NULL)
	{
		printf("\t\t\tDataSet parameter 'TestLabel' uninitialized!!!\n");
		return -1;
	}
	printf("TestLabel:\t\t[%d,%d]\n"
		, dst.TestLabel.row, dst.TestLabel.col);

	printf("========================================================== DataSet Dump Finished ==============================================================\n");

	return 0;
}

int DumpInputLayer(Layer in)
{
	printf("\tInput:\t\t[%d,%d,%d]\n", in.OutH, in.OutW, in.OutC);
	printf("ActiMat[%d,%d]\n", in.ActiMat[0].row, in.ActiMat[0].col);
	return 0;
}

int DumpFullConnLayer(Layer fc)
{
	printf("Layers:\t NeuronNum \t ActiNum\n");
	printf("   FcLayer\t %d \t\t %d \n", fc.OutC, fc.ActiFuncNum);
	printf("OriginMat[%d,%d]\n", fc.OriginMat[0].row, fc.OriginMat[0].col);
	printf("KernelMat[%d,%d]\tWeightMat[%d,%d]\tNablaMat[%d,%d]\n",
		fc.KernelMat.row, fc.KernelMat.col, fc.WeightMat.row, fc.WeightMat.col,
		fc.NablaMat.row, fc.NablaMat.col);
	printf("SumMat[%d,%d]\tActiMat[%d,%d]\tDeriMat[%d,%d]\tDeltaMat[%d,%d]\n",
		fc.SumMat[0].row, fc.SumMat[0].col, fc.ActiMat[0].row, fc.ActiMat[0].col,
		fc.DeriMat[0].row, fc.DeriMat[0].col, fc.DeltaMat[0].row, fc.DeltaMat[0].col);
	return 0;
}

int DumpConvLayer(Layer cv)
{
	printf("Layers:\t Pad \t ActiNum \t Kernel \t Stride \t OutSize \n");
	printf("    CLayer\t %d \t %d \t\t [%d,%d] \t\t [%d,%d] \t\t [%d,%d,%d]\n"
		, cv.PaddingMode, cv.ActiFuncNum, cv.KernelSize[0], cv.KernelSize[1]
		, cv.Stride[0], cv.Stride[1], cv.OutH, cv.OutW, cv.OutC);
	printf("KernelMat[%d,%d]\tWeightMat[%d,%d]\tNablaMat[%d,%d]\n",
		cv.KernelMat.row, cv.KernelMat.col, cv.WeightMat.row, cv.WeightMat.col,
		cv.NablaMat.row, cv.NablaMat.col);
	printf("OriginMat[%d,%d]\n", cv.OriginMat[0].row, cv.OriginMat[0].col);
	printf("SumMat[%d,%d]\tActiMat[%d,%d]\tDeriMat[%d,%d]\tDeltaMat[%d,%d]\n",
		cv.SumMat[0].row, cv.SumMat[0].col, cv.ActiMat[0].row, cv.ActiMat[0].col,
		cv.DeriMat[0].row, cv.DeriMat[0].col, cv.DeltaMat[0].row, cv.DeltaMat[0].col);
	return 0;
}

int DumpDeConvLayer(Layer dec)
{
	printf("Layers:\t Pad \t ActiNum \t Kernel \t Stride \t OutSize \n");
	printf("  deCLayer\t %d \t %d \t\t [%d,%d] \t\t [%d,%d] \t\t [%d,%d,%d]\n"
		, dec.PaddingMode, dec.ActiFuncNum
		, dec.KernelSize[0], dec.KernelSize[1]
		, dec.Stride[0], dec.Stride[1]
		, dec.OutH, dec.OutW, dec.OutC);
	printf("KernelMat[%d,%d]\tNablaMat[%d,%d]\n",
		dec.KernelMat.row, dec.KernelMat.col, dec.NablaMat.row, dec.NablaMat.col);
	printf("OriginMat[%d,%d]\n", dec.OriginMat[0].row, dec.OriginMat[0].col);
	printf("SumMat[%d,%d]\tActiMat[%d,%d]\tDeriMat[%d,%d]\tDeltaMat[%d,%d]\n",
		dec.SumMat[0].row, dec.SumMat[0].col, dec.ActiMat[0].row, dec.ActiMat[0].col,
		dec.DeriMat[0].row, dec.DeriMat[0].col, dec.DeltaMat[0].row, dec.DeltaMat[0].col);
	return 0;
}

int DumpMaxPoolLayer(Layer mp)
{
	printf("Layers:\t Pad \t Kernel \t Stride \t OutSize \n");
	printf("   MPLayer\t %d \t [%d,%d] \t\t [%d,%d] \t\t [%d,%d,%d]\n"
		, mp.PaddingMode, mp.KernelSize[0], mp.KernelSize[1]
		, mp.Stride[0], mp.Stride[1], mp.OutH, mp.OutW, mp.OutC);
	printf("KernelMat[%d,%d]\n",mp.KernelMat.row, mp.KernelMat.col);
	printf("OriginMat[%d,%d]\n", mp.OriginMat[0].row, mp.OriginMat[0].col);
	printf("ActiMat[%d,%d]\tDeltaMat[%d,%d]\n",
		mp.ActiMat[0].row, mp.ActiMat[0].col,mp.DeltaMat[0].row, mp.DeltaMat[0].col);
	return 0;
}

int DumpAvePoolLayer(Layer ap)
{
	printf("Layers:\t Pad \t Kernel \t Stride \t OutSize \n");
	printf("   APLayer\t %d \t [%d,%d] \t\t [%d,%d] \t\t [%d,%d,%d]\n"
		, ap.PaddingMode, ap.KernelSize[0], ap.KernelSize[1]
		, ap.Stride[0], ap.Stride[1], ap.OutH, ap.OutW, ap.OutC);
	printf("KernelMat[%d,%d]\n", ap.KernelMat.row, ap.KernelMat.col);
	printf("OriginMat[%d,%d]\n", ap.OriginMat[0].row, ap.OriginMat[0].col);
	printf("ActiMat[%d,%d]\tDeltaMat[%d,%d]\n",
		ap.ActiMat[0].row, ap.ActiMat[0].col, ap.DeltaMat[0].row, ap.DeltaMat[0].col);
	return 0;
}

int DumpLayersofCNN(CNN cnn)
{
	printf("-----------------------Layers---------------------\n");
	DumpInputLayer(cnn.Layers[0]);
	int i;
	for (i = 1; i < cnn.HiddenLayerNum + 2; i++)
	{
		if (cnn.Layers[i].LayerType == 0)
		{
			printf("%d ", i);
			DumpFullConnLayer(cnn.Layers[i]);
		}
		if (cnn.Layers[i].LayerType == 1)
		{
			printf("%d ", i);
			DumpConvLayer(cnn.Layers[i]);
		}
		if (cnn.Layers[i].LayerType == 2)
		{
			printf("%d ", i);
			DumpDeConvLayer(cnn.Layers[i]);
		}
		if (cnn.Layers[i].LayerType == 3)
		{
			printf("%d ", i);
			DumpMaxPoolLayer(cnn.Layers[i]);
		}
		if (cnn.Layers[i].LayerType == 4)
		{
			printf("%d ", i);
			DumpAvePoolLayer(cnn.Layers[i]);
		}
	}
	printf("--------------------------------------------------\n");
	return 0;
}

int DumpCNN(CNN cnn)
{
	printf("================================================================= CNN Dump ====================================================================\n");
	if (cnn.sampleCapacity == -1) {
		printf("\t\t\tDataSet parameter 'sampleCapacity' uninitialized!!!\n");
		return -1;
	}
	printf("sampleCapacity:\t\t%d\n", cnn.sampleCapacity);

	if (cnn.SampleRow == -1) {
		printf("\t\t\tDataSet parameter 'SampleRow' uninitialized!!!\n");
		return -1;
	}
	printf("SampleRow:\t\t%d\n", cnn.SampleRow);

	if (cnn.SampleCol == -1) {
		printf("\t\t\tDataSet parameter 'SampleCol' uninitialized!!!\n");
		return -1;
	}
	printf("SampleCol:\t\t%d\n", cnn.SampleCol);

	if (cnn.SampleCha == -1) {
		printf("\t\t\tDataSet parameter 'SampleCha' uninitialized!!!\n");
		return -1;
	}
	printf("SampleCha:\t\t%d\n", cnn.SampleCha);

	if (cnn.HiddenLayerNum == -1) {
		printf("\t\t\tDataSet parameter 'HiddenLayerNum' uninitialized!!!\n");
		return -1;
	}
	printf("HiddenLayerNum:\t\t%d\n", cnn.HiddenLayerNum);

	if (cnn.WeightInitWayNum == -1) {
		printf("\t\t\tDataSet parameter 'WeightInitWayNum' uninitialized!!!\n");
		return -1;
	}
	printf("WeightInitWayNum:\t%d\n", cnn.WeightInitWayNum);

	if (cnn.ClassificationNum == -1) {
		printf("\t\t\tDataSet parameter 'ClassificationNum' uninitialized!!!\n");
		return -1;
	}
	printf("ClassificationNum:\t%d\n", cnn.ClassificationNum);

	if (cnn.LossFuncNum == -1) {
		printf("\t\t\tDataSet parameter 'LossFuncNum' uninitialized!!!\n");
		return -1;
	}
	printf("LossFuncNum:\t\t%d\n", cnn.LossFuncNum);

	if (cnn.OnehotMat.element == NULL) {
		printf("\t\t\tDataSet parameter 'OnehotMat' uninitialized!!!\n");
		return -1;
	}
	printf("OnehotMat:\t\t[%d,%d]\n", cnn.OnehotMat.row, cnn.OnehotMat.col);

	if (cnn.Layers == NULL) {
		printf("\t\t\tDataSet parameter 'Layers' uninitialized!!!\n");
		return -1;
	}
	DumpLayersofCNN(cnn);

	printf("============================================================ CNN Dump Finished ================================================================\n");

	return 0;
}

int DumpKWofCNN(CNN cnn)
{
	printf("--------Dump KernelMat and WeightMat of CNN--------\n");
	int i;
	for (i = 1; i < cnn.HiddenLayerNum + 2; ++i)
	{
		printf("%d Layers:\n", i);
		MatDump(&cnn.Layers[i].KernelMat);
		if (cnn.Layers[i].LayerType == 0 || cnn.Layers[i].LayerType == 1)
		{
			// MatDump(&cnn.Layers[i].WeightMat);
		}
	}
	printf("----------------------------------------------------\n");
	return 0;
}

int DumpMatplusHWCC(Mat* matp, int hgh)
{
	MatplusShape(matp, hgh);
	int i, j, k;
	char str[40];
	for (i = 0; i < matp[0].col; ++i)
	{
		printf("Channel %d:\n", i);
		for (j = 0; j < hgh; ++j)
		{
			for (k = 0; k < matp[0].row; ++k)
			{
				printf("%s ", F2S((matp[j].element[k])[i], str));
			}
			printf("\n");
		}
	}
	return 0;
}

int DumpMatplusHWCH(Mat* mp, int hgh)
{
	int i, j, k;
	char str[40];
	MatplusShape(mp, hgh);
	for (i = 0; i < hgh; ++i)
	{
		printf("Height %d :\n", i);
		for (j = 0; j < mp[0].row; ++j)
		{
			for (k = 0; k < mp[0].col; ++k)
			{
				printf("%s ", F2S((mp[i].element[j])[k], str));
			}
			printf("\n");
		}
	}
	printf("\n");
	return 0;
}

//int DumpCNNMat(CNN cnn)
//{
//	int i;
//	DumpMatplusHWCC(cnn.Layers[0].ActiMat, cnn.Layers[0].OutH);
//	for (i = 1; i < cnn.HiddenLayerNum + 2; ++i)
//	{
//		if (cnn.Layers[i].LayerType == 0)
//		{
//			DumpMatplusHWCC(cnn.Layers[i].OriginMat, 1);
//
//		}
//	}
//}

/************************************************************************/
/*                             dump操作                                  */
/************************************************************************/

void LoadParaFromPredefine(Predefine pdf, DataSet *dst, CNN *cnn)
{
	dst->CompleteSampleNum = pdf.CompleteSampleNum;
	dst->TrainSampleNum = pdf.TrainSampleNum;
	dst->TestSampleNum = pdf.TestSampleNum;
	dst->SampleRow = pdf.Height;
	dst->SampleCol = pdf.Width;
	dst->SampleChannels = pdf.Channel;
	dst->ClassificationNum = pdf.ClassificationNum;
	dst->LabelNum = pdf.LabelNum;
	dst->BatchSize = pdf.BatchSize;
	dst->BatchNum = (dst->TrainSampleNum - 1) / dst->BatchSize + 1;
	dst->remainder = dst->TrainSampleNum % dst->BatchSize;

	cnn->sampleCapacity = pdf.CompleteSampleNum;
	cnn->SampleRow = pdf.Height;
	cnn->SampleCol = pdf.Width;
	cnn->SampleCha = pdf.Channel;
	cnn->HiddenLayerNum = pdf.HiddenLayerNum;
	cnn->WeightInitWayNum = pdf.WeightInitWayNum;
	//cnn->Optimization = pdf.Optimization;
	cnn->ClassificationNum = pdf.ClassificationNum;
	cnn->LossFuncNum = pdf.LossFuncNum;
	cnn->OptFuncNum = pdf.OptFuncNum;
	//cnn->LayerType = pdf.LayerType;
	//cnn->ParameterTotal = pdf.ParametersTotal;
	//cnn->LayerParameters = pdf.LayerParameters;
	/*cnn->BatchNum = dst->BatchNum;
	cnn->BatchSize = dst->BatchSize;
	cnn->reminder = dst->remainder;*/
}

/************************************************************************/
/*                            DataSet创建                                */
/************************************************************************/

void CreateDataSetSpace(DataSet *dst)
{
	MatCreate(&dst->CompleteFeatureDataSet, dst->CompleteSampleNum
		, dst->SampleRow*dst->SampleCol*dst->SampleChannels);
	MatZeros(&dst->CompleteFeatureDataSet);
	MatCreate(&dst->CompleteLabelDataSet, dst->CompleteSampleNum, dst->LabelNum);
	MatZeros(&dst->CompleteLabelDataSet);
	MatCreate(&dst->CompleteTrainFeature, dst->TrainSampleNum
		, dst->SampleRow*dst->SampleCol*dst->SampleChannels);
	MatZeros(&dst->CompleteTrainFeature);
	MatCreate(&dst->CompleteTrainLabel, dst->TrainSampleNum, dst->LabelNum);
	MatZeros(&dst->CompleteTrainLabel);

	dst->BatchTrainFeature = (Mat*)malloc(dst->BatchNum * sizeof(Mat));
	dst->BatchTrainLabel = (Mat*)malloc(dst->BatchNum * sizeof(Mat));

	if (dst->BatchTrainFeature == NULL)
	{
		printf("BatchTrainFeature create fail!\n");
	}
	if (dst->BatchTrainLabel == NULL)
	{
		printf("BatchTrainLabel create fail!\n");
	}

	int i;
	for (i = 0; i < dst->BatchNum; ++i)
	{
		//printf("%d ", i);
		if (i == dst->BatchNum - 1 && dst->remainder != 0)// mis spell dst->BatchNum&-1
		{
			MatCreate(&(dst->BatchTrainFeature)[i], dst->remainder
				, dst->SampleRow*dst->SampleCol*dst->SampleChannels);
			MatZeros(&(dst->BatchTrainFeature)[i]);
			MatCreate(&(dst->BatchTrainLabel)[i], dst->remainder, dst->LabelNum);
			MatZeros(&(dst->BatchTrainLabel)[i]);
		}
		else
		{
			MatCreate(&(dst->BatchTrainFeature)[i], dst->BatchSize
				, dst->SampleRow*dst->SampleCol*dst->SampleChannels);
			MatZeros(&(dst->BatchTrainFeature)[i]);
			MatCreate(&(dst->BatchTrainLabel)[i], dst->BatchSize, dst->LabelNum);
			MatZeros(&(dst->BatchTrainLabel)[i]);
		}
	}

	MatCreate(&dst->TestFeature, dst->TestSampleNum
		, dst->SampleRow*dst->SampleCol*dst->SampleChannels);
	MatZeros(&dst->TestFeature);
	MatCreate(&dst->TestLabel, dst->TestSampleNum, dst->LabelNum);
	MatZeros(&dst->TestLabel);
}

void FeatureLoading(DataSet *dst)
{
	//printf("CT\n");
	MatSetVal(&dst->CompleteFeatureDataSet, XVal);

	//printf("TT\n");
	MatSetVal(&dst->CompleteTrainFeature, XVal);

	int i;
	//printf("BT\n");
	for (i = 0; i < dst->BatchNum; ++i)
	{
		//printf("%d %d %d \n", i, dst->BatchNum, dst->remainder);
		MatSetVal(&dst->BatchTrainFeature[i]
			, XVal + i*dst->BatchSize*dst->SampleRow*dst->SampleCol*dst->SampleChannels);
	}

	//printf("ST\n");
	MatSetVal(&dst->TestFeature
		, XVal + dst->TrainSampleNum*dst->SampleRow*dst->SampleCol*dst->SampleChannels);
}

void LabelLoading(DataSet *dst)
{
	MatSetVal(&dst->CompleteLabelDataSet, YVal);
	MatSetVal(&dst->CompleteTrainLabel, YVal);
	int i;
	for (i = 0; i < dst->BatchNum; ++i)
	{
		MatSetVal(&dst->BatchTrainLabel[i]
			, YVal + i*dst->BatchSize);
	}
	MatSetVal(&dst->TestLabel
		, YVal + dst->TrainSampleNum);
}

void LabelLoadingOnehot(DataSet *dst)
{
	int i;
	for (i = 0; i < dst->CompleteSampleNum; ++i)
	{
		(dst->CompleteLabelDataSet.element[i])[(int)YVal[i]] = 1.0f;
	}
	for (i = 0; i < dst->TrainSampleNum; ++i)
	{
		(dst->CompleteTrainLabel.element[i])[(int)YVal[i]] = 1.0f;
		(dst->BatchTrainLabel[i / dst->BatchSize].element[i%dst->BatchSize])[(int)YVal[i]] = 1.0f;
	}

	for (i = 0; i < dst->TestSampleNum; ++i)
	{
		(dst->TestLabel.element[i])[(int)YVal[i + dst->TrainSampleNum]] = 1.0f;
	}
}

void DataConstruct(DataSet *dst)
{
	CreateDataSetSpace(dst);
	FeatureLoading(dst);
	if (dst->LabelNum == 1) {
		LabelLoading(dst);
	}
	else {
		LabelLoadingOnehot(dst);
	}
}

/************************************************************************/
/*                            DataSet创建                                */
/************************************************************************/

/************************************************************************/
/*                              CNN创建                                  */
/************************************************************************/

Layer* SpaceCreateLayers(CNN *cnn)
{
	int i;
	cnn->Layers = (Layer*)malloc((cnn->HiddenLayerNum + 2) * sizeof(Layer));
	for (i = 0; i < cnn->HiddenLayerNum + 2; i++)
	{
		InitLayer(&cnn->Layers[i]);
	}
	return cnn->Layers;
}

int LoadinPara2Layers(CNN *cnn, Predefine pdf)
{
	//Input
	cnn->Layers[0].OutH = cnn->SampleRow;
	cnn->Layers[0].OutW = cnn->SampleCol;
	cnn->Layers[0].OutC = cnn->SampleCha;
	int cnt = 0;
	int i;
	for (i = 1; i < cnn->HiddenLayerNum + 2; i++)
	{
		if (pdf.LayerType[i] == 0)			//F:oa
		{
			cnn->Layers[i].LayerType = 0;
			cnn->Layers[i].OutC = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].ActiFuncNum = pdf.LayerParameters[cnt]; cnt = cnt + 1;

			cnn->Layers[i].OutH = 1;
			cnn->Layers[i].OutW = 1;
		}
		else if (pdf.LayerType[i] == 1)		//C:PKSOA
		{
			cnn->Layers[i].LayerType = 1;
			cnn->Layers[i].PaddingMode = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].OutC = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].ActiFuncNum = pdf.LayerParameters[cnt]; cnt = cnt + 1;

			if (cnn->Layers[i].PaddingMode == 0)
			{
				cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH + cnn->Layers[i].Stride[0]
					- cnn->Layers[i].KernelSize[0]) / cnn->Layers[i].Stride[0];
				cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW + cnn->Layers[i].Stride[1]
					- cnn->Layers[i].KernelSize[1]) / cnn->Layers[i].Stride[1];
			}
			else {
				cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH +
					cnn->Layers[i].Stride[0] - 1) / cnn->Layers[i].Stride[0];
				cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW +
					cnn->Layers[i].Stride[1] - 1) / cnn->Layers[i].Stride[1];
			}
		}
		else if (pdf.LayerType[i] == 2)		//deC:PKSOA
		{
			cnn->Layers[i].LayerType = 2;
			cnn->Layers[i].PaddingMode = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].OutC = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].ActiFuncNum = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			// WILL deC after FC?
			cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH - 1)*
				cnn->Layers[i].Stride[0] + cnn->Layers[i].KernelSize[0];
			cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW - 1)*
				cnn->Layers[i].Stride[1] + cnn->Layers[i].KernelSize[1];
		}
		else if (pdf.LayerType[i] == 3)		//mp:PKS
		{
			cnn->Layers[i].LayerType = 3;
			cnn->Layers[i].PaddingMode = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;

			cnn->Layers[i].OutC = cnn->Layers[i - 1].OutC;
			if (cnn->Layers[i].PaddingMode == 0)
			{
				cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH + cnn->Layers[i].Stride[0]
					- cnn->Layers[i].KernelSize[0]) / cnn->Layers[i].Stride[0];
				cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW + cnn->Layers[i].Stride[1]
					- cnn->Layers[i].KernelSize[1]) / cnn->Layers[i].Stride[1];
			}
			else {
				cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH +
					cnn->Layers[i].Stride[0] - 1) / cnn->Layers[i].Stride[0];
				cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW +
					cnn->Layers[i].Stride[1] - 1) / cnn->Layers[i].Stride[1];
			}
		}
		else if (pdf.LayerType[i] == 4)		//ap
		{
			cnn->Layers[i].LayerType = 4;
			cnn->Layers[i].PaddingMode = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].KernelSize[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[0] = pdf.LayerParameters[cnt]; cnt = cnt + 1;
			cnn->Layers[i].Stride[1] = pdf.LayerParameters[cnt]; cnt = cnt + 1;

			cnn->Layers[i].OutC = cnn->Layers[i - 1].OutC;
			if (cnn->Layers[i].PaddingMode == 0)
			{
				cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH + cnn->Layers[i].Stride[0]
					- cnn->Layers[i].KernelSize[0]) / cnn->Layers[i].Stride[0];
				cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW + cnn->Layers[i].Stride[1]
					- cnn->Layers[i].KernelSize[1]) / cnn->Layers[i].Stride[1];
			}
			else {
				cnn->Layers[i].OutH = (cnn->Layers[i - 1].OutH +
					cnn->Layers[i].Stride[0] - 1) / cnn->Layers[i].Stride[0];
				cnn->Layers[i].OutW = (cnn->Layers[i - 1].OutW +
					cnn->Layers[i].Stride[1] - 1) / cnn->Layers[i].Stride[1];
			}
		}
		else {
			return -1;
		}
	}
	return 0;
}

Layer* SpaceCreateInputLayer(Layer *input)
{
	int i;
	input->ActiMat = (Mat*)malloc(input->OutH * sizeof(Mat));
	for (i = 0; i < input->OutH; ++i)
	{
		MatCreate(&input->ActiMat[i], input->OutW, input->OutC);
	}
	return input;
}

Layer* SpaceCreateFullConnLayer(Layer *fc, int InH, int InW, int InC)
{
	int InDim = InH * InW * InC;
	fc->OriginMat = (Mat*)malloc(sizeof(Mat));
	MatCreate(&fc->OriginMat[0], 1, InDim + 1);
	MatCreate(&fc->KernelMat, InDim, fc->OutC);
	MatCreate(&fc->WeightMat, InDim + 1, fc->OutC);
	MatCreate(&fc->NablaMat, InDim + 1, fc->OutC);
	MatZeros(&fc->NablaMat);
	fc->SumMat = (Mat*)malloc(sizeof(Mat));
	MatCreate(&fc->SumMat[0], 1, fc->OutC);
	fc->ActiMat = (Mat*)malloc(sizeof(Mat));
	MatCreate(&fc->ActiMat[0], 1, fc->OutC);
	fc->DeriMat = (Mat*)malloc(sizeof(Mat));
	MatCreate(&fc->DeriMat[0], 1, fc->OutC);
	fc->DeltaMat = (Mat*)malloc(sizeof(Mat));
	MatCreate(&fc->DeltaMat[0], 1, fc->OutC);
	// MatZeros(&fc->DeltaMat[0]);
	return fc;
}

Layer* SpaceCreateConvLayer(Layer *cv, int InH, int InW, int InC)
{
	int i;
	MatCreate(&cv->KernelMat, cv->KernelSize[0] * cv->KernelSize[1] * InC, cv->OutC);
	MatCreate(&cv->WeightMat, cv->KernelSize[0] * cv->KernelSize[1] * InC + 1, cv->OutC);
	MatCreate(&cv->NablaMat, cv->KernelSize[0] * cv->KernelSize[1] * InC + 1, cv->OutC);
	MatZeros(&cv->NablaMat);
	cv->OriginMat = (Mat*)malloc(cv->OutH * sizeof(Mat));
	cv->SumMat = (Mat*)malloc(cv->OutH * sizeof(Mat));
	cv->ActiMat = (Mat*)malloc(cv->OutH * sizeof(Mat));
	cv->DeriMat = (Mat*)malloc(cv->OutH * sizeof(Mat));
	cv->DeltaMat = (Mat*)malloc(cv->OutH * sizeof(Mat));
	for (i = 0; i < cv->OutH; ++i)
	{
		MatCreate(&cv->OriginMat[i], cv->OutW, cv->KernelSize[0] * cv->KernelSize[1] * InC + 1);
		MatCreate(&cv->SumMat[i], cv->OutW, cv->OutC);
		MatCreate(&cv->ActiMat[i], cv->OutW, cv->OutC);
		MatCreate(&cv->DeriMat[i], cv->OutW, cv->OutC);
		MatCreate(&cv->DeltaMat[i], cv->OutW, cv->OutC);
		// MatZeros(&cv->DeltaMat[i]);
	}
	return cv;
}

Layer* SpaceCreateDeConvLayer(Layer *dec, int InH, int InW, int InC)
{
	int i;
	MatCreate(&dec->KernelMat, InC, dec->KernelSize[0] * dec->KernelSize[1] * dec->OutC);
	MatCreate(&dec->NablaMat, InC, dec->KernelSize[0] * dec->KernelSize[1] * dec->OutC);
	dec->OriginMat = (Mat*)malloc(dec->OutH * sizeof(Mat));
	dec->SumMat = (Mat*)malloc(dec->OutH * sizeof(Mat));
	dec->ActiMat = (Mat*)malloc(dec->OutH * sizeof(Mat));
	dec->DeriMat = (Mat*)malloc(dec->OutH * sizeof(Mat));
	dec->DeltaMat = (Mat*)malloc(dec->OutH * sizeof(Mat));
	MatZeros(&dec->NablaMat);
	for (i = 0; i < dec->OutH; ++i)
	{
		MatCreate(&dec->OriginMat[i], InW, dec->KernelSize[0] * dec->KernelSize[1] * dec->OutC);
		MatCreate(&dec->SumMat[i], dec->OutW, dec->OutC);
		MatCreate(&dec->ActiMat[i], dec->OutW, dec->OutC);
		MatCreate(&dec->DeriMat[i], dec->OutW, dec->OutC);
		MatCreate(&dec->DeltaMat[i], dec->OutW, dec->OutC);
		// MatZeros(&dec->DeltaMat[i]);
	}
	return dec;
}

Layer* SpaceCreateMaxPoolLayer(Layer *mp, int InH, int InW, int InC)
{
	int i;
	MatCreate(&mp->KernelMat, mp->OutH * mp->OutW, mp->OutC);
	mp->OriginMat = (Mat*)malloc(mp->OutH * sizeof(Mat));
	mp->ActiMat = (Mat*)malloc(mp->OutH * sizeof(Mat));
	mp->DeltaMat = (Mat*)malloc(mp->OutH * sizeof(Mat));
	for (i = 0; i < mp->OutH; ++i)
	{
		MatCreate(&mp->OriginMat[i], mp->OutW, mp->KernelSize[0] * mp->KernelSize[1] * InC);
		MatCreate(&mp->ActiMat[i], mp->OutW, mp->OutC);
		MatCreate(&mp->DeltaMat[i], mp->OutW, mp->OutC);
	}
	return mp;
}

Layer* SpaceCreateAvePoolLayer(Layer *ap, int InH, int InW, int InC)
{
	int i;
	MatCreate(&ap->KernelMat, ap->KernelSize[0] * ap->KernelSize[1] * InC, ap->OutC);
	ap->OriginMat = (Mat*)malloc(ap->OutH * sizeof(Mat));
	ap->ActiMat = (Mat*)malloc(ap->OutH * sizeof(Mat));
	ap->DeltaMat = (Mat*)malloc(ap->OutH * sizeof(Mat));
	for (i = 0; i < ap->OutH; ++i)
	{
		MatCreate(&ap->OriginMat[i], ap->OutW, ap->KernelSize[0] * ap->KernelSize[1] * InC);
		MatCreate(&ap->ActiMat[i], ap->OutW, ap->OutC);
		MatCreate(&ap->DeltaMat[i], ap->OutW, ap->OutC);
	}
	return ap;
}

int CreateNNMatSpace(CNN *cnn)
{
	int i;
	SpaceCreateInputLayer(&cnn->Layers[0]);
	for (i = 1; i < cnn->HiddenLayerNum + 2; ++i)
	{
		if (cnn->Layers[i].LayerType == 0)
		{
			SpaceCreateFullConnLayer(&cnn->Layers[i], cnn->Layers[i - 1].OutH, cnn->Layers[i - 1].OutW, cnn->Layers[i - 1].OutC);
			continue;
		}
		if (cnn->Layers[i].LayerType == 1)
		{
			SpaceCreateConvLayer(&cnn->Layers[i], cnn->Layers[i - 1].OutH, cnn->Layers[i - 1].OutW, cnn->Layers[i - 1].OutC);
			continue;
		}
		if (cnn->Layers[i].LayerType == 2)
		{
			SpaceCreateDeConvLayer(&cnn->Layers[i], cnn->Layers[i - 1].OutH, cnn->Layers[i - 1].OutW, cnn->Layers[i - 1].OutC);
			continue;
		}
		if (cnn->Layers[i].LayerType == 3)
		{
			SpaceCreateMaxPoolLayer(&cnn->Layers[i], cnn->Layers[i - 1].OutH, cnn->Layers[i - 1].OutW, cnn->Layers[i - 1].OutC);
			continue;
		}
		if (cnn->Layers[i].LayerType == 4)
		{
			SpaceCreateAvePoolLayer(&cnn->Layers[i], cnn->Layers[i - 1].OutH, cnn->Layers[i - 1].OutW, cnn->Layers[i - 1].OutC);
			continue;
		}
	}
	return 0;
}

int CreateNNSpaceAndLoadinPara2Layer(CNN *cnn, Predefine pdf)
{
	cnn->Layers = SpaceCreateLayers(cnn);
	MatCreate(&cnn->OnehotMat, 1, pdf.LabelNum);
	LoadinPara2Layers(cnn, pdf);
	CreateNNMatSpace(cnn);
	return 0;
}

/************************************************************************/
/*                              CNN创建                                  */
/************************************************************************/

/************************************************************************/
/*                           权值初始化函数操作                         */
/************************************************************************/
float gaussrand_NORMAL() {
	float V1 = 0., V2 = 0., S = 0.;
	int phase = 0;
	int count = 0;
	float X;


	if (phase == 0) {
		while (count == 0 || (S >= 1 || S == 0)) {
			float U1 = (float)(rand() % 10000) / 10000.f;
			float U2 = (float)(rand() % 10000) / 10000.f;


			V1 = 2 * U1 - 1;
			V2 = 2 * U2 - 1;
			S = V1 * V1 + V2 * V2;
			count += 1;
		};

		float temp_S_1 = (float)log(S);
		X = V1 * (float)sqrt(-2 * temp_S_1 / S);
	}
	else
	{
		float temp_S_2 = (float)log(S);
		X = V2 * (float)sqrt(-2 * temp_S_2 / S);
	}



	phase = 1 - phase;


	return X;
}

float gaussrand(float mean, float stdc) {
	return mean + gaussrand_NORMAL() * stdc;
}

// 全0初始化
Mat* MatInitZero(Mat *src)
{
	MatZeros(src);
	return src;
}

/* 初始化成0均值 0.01方差的标准化数据*/
Mat* MatInitRandomNormalization(Mat *src)
{
	srand((unsigned int)time(NULL));  // set randon seed

	int row, col;

	//weight
	for (row = 0; row < src->row; ++row) {
		for (col = 0; col < src->col; ++col) {
			(src->element[row])[col] = gaussrand(0.f, 0.1f);  // mean stdc
		}
	}
	return src;
}

Mat* MatInitXavier(Mat *src)
{
	srand((unsigned int)time(NULL));  // set randon seed

	int row, col;
	//weight
	for (row = 0; row < src->row; ++row) {
		for (col = 0; col < src->col; ++col) {
			(src->element[row])[col] = gaussrand(0.f, 0.1f) * (float)sqrt(1.f / src->row);  // mean stdc
		}
	}
	//set bias row 0
	//for (col = 0; col < src->col; ++col){
	//	(src->element[0])[col] = 0.f;
	//}
	return src;
}

Mat* MatInitHe(Mat *src)
{
	srand((unsigned int)time(NULL));  // set randon seed
	// srand(19950826);

	int row, col;
	//weight
	for (row = 0; row < src->row; ++row) {
		for (col = 0; col < src->col; ++col) {
			(src->element[row])[col] = gaussrand(0.f, 0.9f) * (float)sqrt(2.f / src->row);  // mean stdc
		}
	}
	//set bias row 0
	//for (col = 0; col < src->col; ++col){
	//	(src->element[0])[col] = 0.f;
	//}
	return src;
}

// mat plus 初始化

Mat* MatplusInitXavier(Mat *src, int src_h)
{
	srand((unsigned int)time(NULL));  // set randon seed

	int i, j, k;
	//weight
	for (i = 0; i < src_h; ++i)
	{
		for (j = 0; j < src[0].row; ++j) {
			for (k = 0; k < src[0].col; ++k) {
				(src[i].element[j])[k] = gaussrand(0.f, 0.1f) * (float)sqrt(1.f / src_h * src[0].row * src[0].col);  // mean stdc
			}
		}
	}

	return src;
}

Mat* MatplusInitHe(Mat *src, int src_h)
{
	//srand((unsigned int)time(NULL));  // set randon seed
	srand(19950826);

	int i, j, k;
	//weight
	for (i = 0; i < src_h; ++i)
	{
		for (j = 0; j < src[0].row; ++j)
		{
			for (k = 0; k < src[0].col; ++k)
			{
				(src[i].element[j])[k]= gaussrand(0.f, 0.9f) * (float)sqrt(2.f / src_h * src[0].row * src[0].col);
			}
		}
	}
	//set bias row 0
	//for (col = 0; col < src->col; ++col){
	//	(src->element[0])[col] = 0.f;
	//}
	return src;
}

/************************************************************************/
/*                           权值初始化函数操作                         */
/************************************************************************/


/************************************************************************/
/*                             CNN初始化                                 */
/************************************************************************/
void WeightInit_ChooseWay(Mat *Weight, int Style_initWeight) {
	if (Style_initWeight == 0) {
		MatInitZero(Weight);
	}
	else if (Style_initWeight == 1) {
		MatInitRandomNormalization(Weight);
	}
	else if (Style_initWeight == 2) {
		MatInitXavier(Weight);
	}
	else if (Style_initWeight == 3) {
		MatInitHe(Weight);
	}
	else {
		printf("error for WeightInit_ChooseWay, please check Style_initWeight variable!\n");
	}

}

void WeightInit_AverPool(Mat* KernelMat, int kers[2])
{
	int area = kers[0] * kers[1];
	int row, col;
	for (row = 0; row < KernelMat->row; ++row)
	{
		for (col = 0; col < KernelMat->col; ++col)
		{
			(KernelMat->element[row])[col] = 1.0 / (float)area;
		}
	}
}

int NNWeightInit(CNN *cnn)
{
	int i;
	for (i = 1; i < cnn->HiddenLayerNum + 2; ++i)
	{
		if (cnn->Layers[i].LayerType == 0 || cnn->Layers[i].LayerType == 1)
		{
			/*WeightInit_ChooseWay(&cnn->Layers[i].WeightMat, cnn->WeightInitWayNum);
			MatReduceRow(&cnn->Layers[i].WeightMat, &cnn->Layers[i].KernelMat);*/

			WeightInit_ChooseWay(&cnn->Layers[i].KernelMat, cnn->WeightInitWayNum);
			MatPlusRow(&cnn->Layers[i].KernelMat, &cnn->Layers[i].WeightMat);
		}
		else if (cnn->Layers[i].LayerType == 4)
		{
			WeightInit_AverPool(&cnn->Layers[i].KernelMat, cnn->Layers[i].KernelSize);
		}
		else{
			WeightInit_ChooseWay(&cnn->Layers[i].KernelMat, cnn->WeightInitWayNum);
		}
	}
	return 0;
}

/************************************************************************/
/*                             CNN初始化                                 */
/************************************************************************/

/************************************************************************/
/*                           激活函数操作                               */
/************************************************************************/
float sigmoid(float z)
{
	return (float)(1 / (1 + exp(-z)));
}

//直接调用 math.h 里函数
//float tanh(float z){
//	return (exp(z) - exp(-z)) / (exp(z) + exp(-z));
//}
//

float relu(float z)
{
	return z > 0 ? z : 0;
}

float leakyRelu(float z, float a)
{
	return z < 0 ? a : z;
}

Mat* MatSoftmax(Mat *src, Mat *dst)
{
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatSofmax\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	Mat tempV;
	MatCreate(&tempV, src->row, 1);   //存临时的最大行值以及求和行值向量

	MatRowMax(src, &tempV);   //求最大行值向量

	MatVectorSub(src, &tempV, dst);   //矩阵向量相减

	MatExp(dst, dst);   //矩阵求对数

	MatRowSum(dst, &tempV);   //求行求和向量

	MatVectorDiv(dst, &tempV, dst);   //矩阵向量相除

	MatDelete(&tempV);

	return dst;
}

Mat* MatNoneActi(Mat *src, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatNoneActi\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = (src->element[row])[col];
	}

	return dst;
}

Mat* MatSigmoid(Mat *src, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatSigmoid\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = sigmoid((src->element[row])[col]);
	}

	return dst;
}

Mat* MatTanh(Mat *src, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatTanh\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = (float)tanh((src->element[row])[col]);
	}

	return dst;
}

Mat* MatRelu(Mat *src, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatRelu\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = relu((src->element[row])[col]);
	}

	return dst;
}

Mat* MatLeakyRelu(float a, Mat *src, Mat *dst)
{
	int row, col;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatLeakyRelu\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = leakyRelu((src->element[row])[col], a);
	}

	return dst;
}

//Derivative
Mat* MatDerivationSoftmax(Mat *src, Mat *dst)
{
	int row, col, i;
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatDerivationSofmax\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	MatSoftmax(src, src);
	MatZeros(dst);

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			for (i = 0; i < src->col; i++) {
				if (i == col) {
					(dst->element[row])[col] += (src->element[row])[i] * (1 - (src->element[row])[col]);
				}
				else {
					(dst->element[row])[col] += -(src->element[row])[i] * (src->element[row])[col];
				}
				//printf("%f\n", (dst->element[row])[col]);
			}
			//printf("sum=%f\n", (dst->element[row])[col]);
		}
	}


	return dst;
}

Mat* MatDerivationNoneActi(Mat *src, Mat *dst)
{	//不激活导数值为1
	int row, col;
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatDerivationNoneActi\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			(dst->element[row])[col] = 1.f;
	}

	return dst;
}

Mat* MatDerivationSigmoid(Mat *src, Mat *dst)
{
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatDerivationSigmoid\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif
	Mat temp1Mat;
	Mat temp2Mat;
	MatCreate(&temp1Mat, src->row, src->col);
	MatCreate(&temp2Mat, src->row, src->col);

	MatSigmoid(src, &temp1Mat);
	//MatDump(&temp1Mat);
	MatNumMul(-1.f, &temp1Mat, &temp2Mat);
	//MatDump(&temp2Mat);
	MatNumAdd(1.f, &temp2Mat, &temp2Mat);
	//MatDump(&temp2Mat);
	MatProduct(&temp1Mat, &temp2Mat, dst);
	//MatDump(dst);
	MatDelete(&temp1Mat);
	MatDelete(&temp2Mat);
	return dst;
}

Mat* MatDerivationTanh(Mat *src, Mat *dst)
{
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatDerivationTanh\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	Mat tempMat;
	MatCreate(&tempMat, src->row, src->col);

	MatTanh(src, &tempMat);
	//MatDump(&tempMat);
	MatSquare(&tempMat, &tempMat);
	//MatDump(&tempMat);
	MatNumMul(-1.f, &tempMat, &tempMat);
	//MatDump(&tempMat);
	MatNumAdd(1.f, &tempMat, dst);

	MatDelete(&tempMat);
	return dst;
}

Mat* MatDerivationRelu(Mat *src, Mat *dst)
{
	int row, col;
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatDerivationRelu\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif
	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			if ((src->element[row])[col]>0) {
				(dst->element[row])[col] = 1.f;
			}
			else {
				(dst->element[row])[col] = 0.f;
			}
		}

	}

	return dst;
}

Mat* MatDerivationLeakyRelu(float a, Mat *src, Mat *dst)
{
	int row, col;
#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for MatDerivationLeakyRelu\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif
	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++) {
			if ((src->element[row])[col]>0) {
				(dst->element[row])[col] = 1.f;
			}
			else {
				(dst->element[row])[col] = a;
			}
		}

	}

	return dst;
}
/************************************************************************/
/*                           激活函数操作                               */
/************************************************************************/

/************************************************************************/
/*                           损失函数操作                               */
/************************************************************************/
Mat* OneHot(Mat *src, int k, Mat *dst)
{
	int row;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != 1 || dst->col != k) {
		printf("\t\terr check, unmathed matrix for Onehot\t\t\n");
		printf("\t\tsrcMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tThe number of class:\n\t\t\t");
		printf("%d\n", k);
		printf("\t\tdstMatShape:\n\t\t\t");
		MatShape(dst);
		return NULL;
	}
#endif

	MatZeros(dst);

	for (row = 0; row < dst->row; row++) {
		(dst->element[row])[int((src->element[row])[0])] = 1.0f;
	}

	return dst;
}

/*均方误差*/
float MSE(Mat *src, Mat *dst)
{
	int row;
	float loss = 0.f;
	Mat sub_square_mat;
	Mat sum_row_mat;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for Loss Function MSE\t\t\n");
		printf("\t\tPredictoinMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tOneHotMatShape:\n\t\t\t");
		MatShape(dst);
		return -1.;     // 参数检查不过关返回 -1；
	}
#endif

	MatCreate(&sub_square_mat, src->row, src->col);
	MatCreate(&sum_row_mat, src->row, 1);

	MatSub(src, dst, &sub_square_mat);
	//MatDump(&sub_square_mat);
	MatSquare(&sub_square_mat, &sub_square_mat);
	//MatDump(&sub_square_mat);
	MatRowSum(&sub_square_mat, &sum_row_mat);
	//MatDump(&sum_row_mat);

	for (row = 0; row < src->row; ++row) {
		//printf("%f\t", (sum_row_mat.element[row])[0]);
		loss = loss + (sum_row_mat.element[row])[0];
	}
	loss = loss / float(src->row);

	MatDelete(&sub_square_mat);
	MatDelete(&sum_row_mat);
	return loss;
}


float CrossEntropy(Mat *src, Mat *dst)
{
	int row, col;
	float loss = 0.f;

#ifdef MAT_LEGAL_CHECKING
	if (src->row != dst->row || src->col != dst->col) {
		printf("\t\terr check, unmathed matrix for Loss Function CrossEntropy\t\t\n");
		printf("\t\tPredictoinMatShape:\n\t\t\t");
		MatShape(src);
		printf("\t\tOneHotMatShape:\n\t\t\t");
		MatShape(dst);
		return -1.;     // 参数检查不过关返回 -1；
	}
#endif

	for (row = 0; row < src->row; row++) {
		for (col = 0; col < src->col; col++)
			loss += (float)(-1 * (dst->element[row])[col] * log((src->element[row])[col]));
	}

	loss = loss / (src->row);

	return loss;
}


//对激活值求导数
Mat * MSEDerivative(Mat *ActiMat, Mat *DerivativeActiMat, Mat One_hotMat) {

#ifdef MAT_LEGAL_CHECKING
	if (ActiMat->row != DerivativeActiMat->row || ActiMat->col != DerivativeActiMat->col || ActiMat->row != One_hotMat.row || ActiMat->col != One_hotMat.col) {
		printf("\t\terr check, unmathed matrix for Loss Function MSEDerivative\t\t\n");
		printf("\t\tActiMatShape:\n\t\t\t");
		MatShape(ActiMat);
		printf("\t\tDerivativeActiMatShape:\n\t\t\t");
		MatShape(DerivativeActiMat);
		printf("\t\tOne_hotMatShape:\n\t\t\t");
		MatShape(&One_hotMat);
		return NULL;     // 参数检查不过关返回 -1；
	}
#endif

	//partial L /partial z = z - y
	return MatSub(ActiMat, &One_hotMat, DerivativeActiMat);

}


Mat * CrossEntropyDerivative(Mat *ActiMat, Mat *DerivativeActiMat, Mat One_hotMat) {
	int row, col;
#ifdef MAT_LEGAL_CHECKING
	if (ActiMat->row != DerivativeActiMat->row || ActiMat->col != DerivativeActiMat->col || ActiMat->row != One_hotMat.row || ActiMat->col != One_hotMat.col) {
		printf("\t\terr check, unmathed matrix for Loss Function CrossEntropyDerivative\t\t\n");
		printf("\t\tActiMatShape:\n\t\t\t");
		MatShape(ActiMat);
		printf("\t\tDerivativeActiMatShape:\n\t\t\t");
		MatShape(DerivativeActiMat);
		printf("\t\tOne_hotMatShape:\n\t\t\t");
		MatShape(&One_hotMat);
		return NULL;     // 参数检查不过关返回 -1；
	}
#endif

	//partial L /partial z = -(y / z)
	for (row = 0; row < ActiMat->row; row++) {
		for (col = 0; col < ActiMat->col; col++)
			//div 0 error
			if (equal((ActiMat->element[row])[col], 0.f) == 1) {
				(DerivativeActiMat->element[row])[col] = -(One_hotMat.element[row])[col] * 10000000000;
			}
			else {
				(DerivativeActiMat->element[row])[col] = -(One_hotMat.element[row])[col] / (ActiMat->element[row])[col];
			}
	}

	return DerivativeActiMat;

}
/************************************************************************/
/*                           损失函数操作                               */
/************************************************************************/

/************************************************************************/
/*                             传播辅助函数                               */
/************************************************************************/

Mat* MatPadReConstructBias(Mat *des, int des_h, Mat *src, int src_h, int kers[2], int strd[2], int pad)
{
	// RC&P will be difficult in verified dimension ,so verify is maken sure by Space Create
	int pdh, pdw;
	if (pad == 0)
	{
		pdh = 0;
		pdw = 0;
	}
	else if(pad == 1)
	{
		if (strd[0] == 1) pdh = kers[0] - 1;
		else pdh = kers[0] - src_h%strd[0];
		if (strd[1] == 1) pdw = kers[1] - 1;
		else pdw = kers[1] - src[0].row%strd[1];
	}
	else {
		return NULL;
	}
	if (
		(!pad && ((des_h != (src_h + strd[0] - kers[0]) / strd[0]) || (des[0].row != (src[0].row + strd[1] - kers[1]) / strd[1]))) ||
		(pad && ((des_h != (src_h + strd[0] - 1) / strd[0]) || (des[0].row != (src[0].row + strd[1] - 1) / strd[1]))) ||
		des[0].col != kers[0] * kers[1] * src[0].col + 1
		)
	{
		printf("\t\terr check, unmathed matrix for MatPadReConstruct\t\t\n");
		printf("\t\tDesMatShape:\n\t\t\t");
		MatplusShape(des, des_h);
		printf("\t\tSrcMatShape:\n\t\t\t");
		MatplusShape(src, src_h);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int i, j, k, m, n;
	for (i = 0; i < des_h; ++i)
	{
		for (j = 0; j < des[0].row; ++j)
		{
			(des[i].element[j])[0] = 1.0;
			for (k = 0; k < src[i].col; ++k)
			{
				for (m = 0; m < kers[0]; ++m)
				{
					for (n = 0; n < kers[1]; ++n)
					{
						if (i*strd[0] + m < pdh / 2 ||
							j*strd[1] + n < pdw / 2 ||
							i*strd[0] + m >= src_h + pdh / 2 ||
							j*strd[1] + n >= src[0].row + pdw / 2)
						{
							(des[i].element[j])[k*kers[0] * kers[1] + m*kers[1] + n + 1] = 0;
						}
						else {
							(des[i].element[j])[k*kers[0] * kers[1] + m*kers[1] + n + 1]
								= (src[i*strd[0] + m - pdh / 2].element[j*strd[1] + n - pdw / 2])[k];

						}
					}
				}
			}
		}
	}
	return des;
}

Mat* MatPadReConstruct(Mat *des, int des_h, Mat *src, int src_h, int kers[2], int strd[2], int pad)
{
	int pdh, pdw;
	if (pad == 0)
	{
		pdh = 0;
		pdw = 0;
	}
	else if (pad == 1)
	{
		if (strd[0] == 1) pdh = kers[0] - 1;
		else pdh = kers[0] - src_h%strd[0];
		if (strd[1] == 1) pdw = kers[1] - 1;
		else pdw = kers[1] - src[0].row%strd[1];
	}
	else {
		return NULL;
	}
	if (
		(!pad && ((des_h != (src_h + strd[0] - kers[0]) / strd[0]) || (des[0].row != (src[0].row + strd[1] - kers[1]) / strd[1]))) ||
		(pad && ((des_h != (src_h + strd[0] - 1) / strd[0]) || (des[0].row != (src[0].row + strd[1] - 1) / strd[1]))) ||
		des[0].col != kers[0] * kers[1] * src[0].col
		)
	{
		printf("\t\terr check, unmathed matrix for MatPadReConstruct\t\t\n");
		printf("\t\tDesMatShape:\n\t\t\t");
		MatplusShape(des, des_h);
		printf("\t\tSrcMatShape:\n\t\t\t");
		MatplusShape(src, src_h);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int i, j, k, m, n;
	for (i = 0; i < des_h; ++i)
	{
		for (j = 0; j < des[0].row; ++j)
		{
			for (k = 0; k < src[i].col; ++k)
			{
				for (m = 0; m < kers[0]; ++m)
				{
					for (n = 0; n < kers[1]; ++n)
					{
						if (i*strd[0] + m < pdh / 2 ||
							j*strd[1] + n < pdw / 2 ||
							i*strd[0] + m >= src_h + pdh / 2 ||
							j*strd[1] + n >= src[0].row + pdw / 2)
						{
							(des[i].element[j])[k*kers[0] * kers[1] + m*kers[1] + n] = 0;
						}
						else {
							(des[i].element[j])[k*kers[0] * kers[1] + m*kers[1] + n]
								= (src[i*strd[0] + m - pdh / 2].element[j*strd[1] + n - pdw / 2])[k];

						}
					}
				}
			}
		}
	}
	return des;
}

Mat* MatplusMulMat(Mat *sm, int sm_h, Mat *om, int om_h, Mat wm)
{
	if (om_h!=sm_h||om[0].row!=sm[0].row||wm.row!=om[0].col||wm.col!=sm[0].col) {
		printf("\t\terr check, unmathed matrix for MatplusMulMat\t\t\n");
		printf("\t\tOriginMatShape:\n\t\t\t");
		MatplusShape(om, sm_h);
		printf("\t\tWeightMatShape:\n\t\t\t");
		MatShape(&wm);
		printf("\t\tSumMatShape:\n\t\t\t");
		MatplusShape(sm, sm_h);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int i, j, k, l;
	MatplusZeros(sm, sm_h);
	for (i = 0; i < sm_h; ++i)
	{
		for (j = 0; j < sm[0].row; ++j)
		{
			for (k = 0; k < sm[0].col; ++k)
			{
				float tmp = 0.0;
				for (l = 0; l < om[0].col; ++l)
				{
					tmp = tmp + (om[i].element[j])[l] * (wm.element[l])[k];
				}
				(sm[i].element[j])[k] = tmp;
			}
		}
	}
	return sm;
}

Mat* MatplusMulMat_T_unmatch(Mat *dst, int dst_h, Mat *src, int src_h, Mat km)
{
	if (src_h*src[0].row*km.row!=dst_h*dst[0].row*dst[0].col||src[0].col!=km.col) {
		printf("\t\terr check, unmathed matrix for MatplusMulMat_T_unmatch\t\t\n");
		printf("\t\tDstShape:\n\t\t\t");
		MatplusShape(dst, dst_h);
		printf("\t\tKernelMatShape:\n\t\t\t");
		MatShape(&km);
		printf("\t\tSrcShape:\n\t\t\t");
		MatplusShape(src, src_h);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int cnt1 = 0, cnt2 = 0, cnt3 = 0;
	int i, j, k, l;
	for (i = 0; i < src_h; ++i)
	{
		for (j = 0; j < src[0].row; ++j)
		{
			for (k = 0; k < km.row; ++k)
			{
				float tmp = 0.0;
				for (l = 0; l < km.col; ++l)
				{
					tmp = tmp + (src[i].element[j])[l] * (km.element[k])[l];
				}
				(dst[cnt1].element[cnt2])[cnt3] = tmp;
				++cnt3;
				if (cnt3 >= dst[0].col) {
					++cnt2;
					cnt3 = 0;
					if (cnt2 >= dst[0].row) {
						++cnt1;
						cnt2 = 0;
					}
				}
			}
		}
	}
	return dst;
}

Mat* MatplusMulMat_T(Mat *dst, int dst_h, Mat *src, int src_h, Mat km)
{
	if (src_h != dst_h || src[0].row != dst[0].row || km.row != dst[0].col || src[0].col != km.col) {
		printf("\t\terr check, unmathed matrix for MatplusMulMat_T\t\t\n");
		printf("\t\tDstShape:\n\t\t\t");
		MatplusShape(dst, dst_h);
		printf("\t\tKernelMatShape:\n\t\t\t");
		MatShape(&km);
		printf("\t\tSrcShape:\n\t\t\t");
		MatplusShape(src, src_h);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int i, j, k, l;

	for (i = 0; i < dst_h; ++i)
	{
		for (j = 0; j < dst[0].row; ++j)
		{
			for (k = 0; k < dst[0].col; ++k)
			{
				float tmp = 0.0;
				for (l = 0; l < src[0].col; ++l)
				{
					tmp = tmp + (src[i].element[j])[l] * (km.element[k])[l];
				}
				(dst[i].element[j])[k] = tmp;
			}
		}
	}
	return dst;
}

Mat* MatDeConstruct(Mat *des, int des_h, Mat *src, int src_h, int kers[2], int strd[2], int pad)
{
	if (des_h != (src_h - 1)*strd[0] + kers[0] || des[0].row != (src[0].row - 1)*strd[1] + kers[1] || kers[0] * kers[1] * des[0].col != src[0].col)
	{
		printf("\t\terr check, unmathed matrix for MatDeConstruct\t\t\n");
		printf("\t\tDstShape:\n\t\t\t");
		MatplusShape(des, des_h);
		printf("\t\tSrcShape:\n\t\t\t");
		MatplusShape(src, src_h);
		return NULL;     // 参数检查不过关返回 -1；
	}
	MatplusZeros(des, des_h);
	int i, j, k;
	for (i = 0; i < src_h; ++i)
	{
		for (j = 0; j < src[0].row; ++j)
		{
			for (k = 0; k < src[i].col; ++k)
			{
				int d1, d2, d3;
				d1 = i*strd[0] + (k / kers[1]) % kers[0]; // (k % (kers[0] * kers[1])) / kers[1]
				d2 = j*strd[1] + k%kers[1]; // (k % (kers[0] * kers[1])) % kers[1]
				d3 = k / (kers[0] * kers[1]);
				(des[d1].element[d2])[d3] = (des[d1].element[d2])[d3] + (src[i].element[j])[k];
			}
		}
	}
	return des;
}

Mat* MatDeConstructfwd(Mat *des, int des_h, Mat *src, int src_h, int kers[2], int strd[2], int pad)
{
	int pdh, pdw;
	if (pad == 0)
	{
		pdh = 0;
		pdw = 0;
	}
	else if (pad == 1)
	{
		if (strd[0] == 1) pdh = kers[0] - 1;
		else pdh = kers[0] - src_h%strd[0];
		if (strd[1] == 1) pdw = kers[1] - 1;
		else pdw = kers[1] - src[0].row%strd[1];
	}
	else {
		return NULL;
	}
	// if (des_h != (src_h - 1)*strd[0] + kers[0] - pdh ||
	// 	des[0].row != (src[0].row - 1)*strd[1] + kers[1] - pdw ||
	// 	kers[0] * kers[1] * des[0].col != src[0].col)
	// {
	// 	printf("\t\terr check, unmathed matrix for MatDeConstruct\t\t\n");
	// 	printf("\t\tDstShape:\n\t\t\t");
	// 	MatplusShape(des, des_h);
	// 	printf("\t\tSrcShape:\n\t\t\t");
	// 	MatplusShape(src, src_h);
	// 	return NULL;     // 参数检查不过关返回 -1；
	// }
	MatplusZeros(des, des_h);
	int i, j, k;
	for (i = 0; i < src_h; ++i)
	{
		for (j = 0; j < src[0].row; ++j)
		{
			for (k = 0; k < src[i].col; ++k)
			{
				int d1, d2, d3;
				d1 = i*strd[0] + (k / kers[1]) % kers[0]; // (k % (kers[0] * kers[1])) / kers[1]
				d2 = j*strd[1] + k%kers[1]; // (k % (kers[0] * kers[1])) % kers[1]
				d3 = k / (kers[0] * kers[1]);
				if ((d1 > pdh / 2 && d1 <= src_h + pdh / 2) && (d2 > pdw / 2 && d2 <= src[0].row + pdw / 2))
					(des[d1 - pdh / 2 - 1].element[d2 - pdw / 2 - 1])[d3] =
					(des[d1 - pdh / 2 - 1].element[d2 - pdw / 2 - 1])[d3] +
					(src[i].element[j])[k];
			}
		}
	}
	return des;
}

// des<2d> = flat12(src1<3d>)^T * flat12(src2<3d>)
Mat* Matplus_TMulMatplus(Mat *des, Mat* src1, int src1_h, Mat *src2, int src2_h)
{
	if (src1_h != src2_h || src1[0].row != src2[0].row || des->row != src1[0].col || des->col != src2[0].col) {
		printf("\t\terr check, unmathed matrix for Matplus_TMulMatplus\t\t\n");
		/*printf("\t\tActiMatShape:\n\t\t\t");
		MatShape(ActiMat);
		printf("\t\tDerivativeActiMatShape:\n\t\t\t");
		MatShape(DerivativeActiMat);
		printf("\t\tOne_hotMatShape:\n\t\t\t");
		MatShape(&One_hotMat);*/
		return NULL;     // 参数检查不过关返回 -1；
	}
	int i, j, m, n;
	float tmp = 0.0;
	for (i = 0; i < des->row; ++i)
	{
		for (j = 0; j < des->col; ++j)
		{
			for (m = 0; m < src1_h; ++m)
			{
				for (n = 0; n < src1[0].row; ++n)
				{
					tmp = tmp + (src1[m].element[n])[i] * (src1[m].element[n])[j];
				}
			}
			(des->element[i])[j] = tmp;
			tmp = 0.0;
		}
	}
	return des;
}

Mat* Matplus_TMulMatplusSumDes(Mat *des, Mat* src1, int src1_h, Mat *src2, int src2_h)
{

	if (src1_h != src2_h || src1[0].row != src2[0].row || des->row != src1[0].col || des->col != src2[0].col) {
		printf("\t\terr check, unmathed matrix for Matplus_TMulMatplus\t\t\n");
		/*printf("\t\tActiMatShape:\n\t\t\t");
		MatShape(ActiMat);
		printf("\t\tDerivativeActiMatShape:\n\t\t\t");
		MatShape(DerivativeActiMat);
		printf("\t\tOne_hotMatShape:\n\t\t\t");
		MatShape(&One_hotMat);*/
		return NULL;     // 参数检查不过关返回 -1；
	}

	int i, j, m, n;
	double tmp = 0.0;
	for (i = 0; i < des->row; ++i)
	{
		for (j = 0; j < des->col; ++j)
		{
			for (m = 0; m < src1_h; ++m)
			{
				for (n = 0; n < src1[0].row; ++n)
				{
					tmp = tmp + (src1[m].element[n])[i] * (src2[m].element[n])[j]; // src2 miswrite 1
				}
			}
			(des->element[i])[j] = (des->element[i])[j] + (float)(tmp);
			tmp = 0.0;
		}
	}
	return des;
}

Mat* MaxChoose(Mat *am, int am_h, Mat *km, Mat* om, int om_h)
{
	if (am_h != om_h || am[0].row != om[0].row) {
		printf("\t\terr check, unmathed matrix for MaxChoose\t\t\n");
		printf("\t\tActiMatShape:\n\t\t\t");
		MatplusShape(am, am_h);
		printf("\t\tOriginActiMatShape:\n\t\t\t");
		MatplusShape(om, om_h);
		printf("\t\tKernelMatShape:\n\t\t\t");
		MatShape(km);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int ka = om[0].col / am[0].col;
	int i, j, k, l;
	for (i = 0; i < am_h; ++i)
	{
		for (j = 0; j < am[0].row; ++j)
		{
			for (k = 0; k < am[0].col; ++k)
			{
				(am[i].element[j])[k] = (om[i].element[j])[k*ka];
				(km->element[i*am[0].row + j])[k] = 0;
				for (l = 1; l < ka; ++l)
				{
					if ((am[i].element[j])[k] < (om[i].element[j])[k*ka + l])
					{
						(am[i].element[j])[k] = (om[i].element[j])[k*ka + l];
						(km->element[i*am[0].row + j])[k] = (float)l;
					}
				}
			}
		}
	}
	return am;
}

Mat* DeMaxChoose(Mat* dm, int dm_h, Mat* km, Mat* dml, int dml_h)
{
//#ifdef MAT_LEGAL_CHECKING
	if (dm_h != dml_h || dm[0].row != dml[0].row || dml[0].col != km->col) {
		printf("\t\terr check, unmathed matrix for DeMaxChoose\t\t\n");
		printf("\t\tDeltaMatShape:\n\t\t\t");
		MatplusShape(dm, dm_h);
		printf("\t\tLaterDeltaShape:\n\t\t\t");
		MatplusShape(dml, dml_h);
		printf("\t\tKernelShape:\n\t\t\t");
		MatShape(km);
		return NULL;     // 参数检查不过关返回 -1；
	}
//#endif
	int ka = dm[0].col / dml[0].col;
	int i, j, k, l;
	for (i = 0; i < dm_h; ++i)
	{
		for (j = 0; j < dm[0].row; ++j)
		{
			for (k = 0; k < km->col; ++k)
			{
				for (l = 0; l < ka; ++l)
				{
					// printf("[%d,%d] %d %d %d %d\n", km->row, km->col, i, j, k, l);
					if ((int)(km->element[i*dm[0].row + j])[k] == l)  // km->row correct to dm[0].row
					{
						(dm[i].element[j])[k*ka + l] = (dml[i].element[j])[k];
					}
					else {
						(dm[i].element[j])[k*ka + l] = 0.0;
					}
				}
			}
		}
	}
	return dm;
}

Mat* FCload(Mat* om, Mat* am, int am_h)
{
	if (om[0].col != am_h * am[0].row * am[0].col + 1) {
		printf("\t\terr check, unmathed matrix for FCload\t\t\n");
		printf("\t\tActiMatShape:\n\t\t\t");
		MatplusShape(am, am_h);
		printf("\t\tOriginMatShape:\n\t\t\t");
		MatplusShape(om, 1);
		return NULL;     // 参数检查不过关返回 -1；
	}
	int i, j, k;
	int omcnt = 1;
	(om[0].element[0])[0] = 1.0;
	for (i = 0; i < am_h; ++i)
	{
		for (j = 0; j < am[0].row; ++j)
		{
			for (k = 0; k < am[0].col; ++k)
			{
				(om[0].element[0])[omcnt] = (am[i].element[j])[k];
				++omcnt;
			}
		}
	}
	return om;
}

Mat* MatplusActivate(Mat *src, Mat *dst, int hgt, int way)
{
	int i;
	if (way == 0) {
		for (i = 0; i < hgt; ++i)
		{
			MatNoneActi(&src[i], &dst[i]);
		}

	}
	else if (way == 1) {
		for (i = 0; i < hgt; ++i)
		{
			MatSigmoid(&src[i], &dst[i]);
		}
	}
	else if (way == 2) {
		for (i = 0; i < hgt; ++i)
		{
			MatTanh(&src[i], &dst[i]);
		}
	}
	else if (way == 3) {
		for (i = 0; i < hgt; ++i)
		{
			MatRelu(&src[i], &dst[i]);
		}
	}
	else if (way == 4) {
		for (i = 0; i < hgt; ++i)
		{
			MatLeakyRelu(0.2f, &src[i], &dst[i]); //leak = 0.2
		}
	}
	else if (way == 5) {
		for (i = 0; i < hgt; ++i)
		{
			MatSoftmax(&src[i], &dst[i]);
		}
	}
	else {
		printf("error for MatActivate, please check ActiFsHidden  variable!\n");
	}
	return dst;
}

Mat* MatplusDerivate(Mat *src, Mat *dst, int hgt, int way)
{
	int i;
	if (way == 0) {
		for (i = 0; i < hgt; ++i)
		{
			MatDerivationNoneActi(&src[i], &dst[i]);
		}

	}
	else if (way == 1) {
		for (i = 0; i < hgt; ++i)
		{
			MatDerivationSigmoid(&src[i], &dst[i]);
		}
	}
	else if (way == 2) {
		for (i = 0; i < hgt; ++i)
		{
			MatDerivationTanh(&src[i], &dst[i]);
		}
	}
	else if (way == 3) {
		for (i = 0; i < hgt; ++i)
		{
			MatDerivationRelu(&src[i], &dst[i]);
		}
	}
	else if (way == 4) {
		for (i = 0; i < hgt; ++i)
		{
			MatDerivationLeakyRelu(0.2f, &src[i], &dst[i]); //leak = 0.2
		}
	}
	else if (way == 5) {
		for (i = 0; i < hgt; ++i)
		{
			MatDerivationSoftmax(&src[i], &dst[i]);
		}
	}
	else {
		printf("error for MatActivate, please check ActiFsHidden  variable!\n");
	}
	return dst;
}

/************************************************************************/
/*                             传播辅助函数                               */
/************************************************************************/

/************************************************************************/
/*                              CNN传播                                  */
/************************************************************************/

//--------------------------------前向传播--------------------------------//

// may need change
float LossFunction(Mat *src, Mat *dst, int Nstr_LossF) {
	if (Nstr_LossF == 0) {
		return MSE(src, dst);
	}
	else if (Nstr_LossF == 1) {
		return CrossEntropy(src, dst);
	}
	else {
		printf("error for Nstr_LossF, please check loss function variable!\n");
		return -1;
	}
}

// am struct and verified data struct
void inputCNN(float* data, CNN* cnn)
{
	// verified before
	// MatSetVal
	int i, j, k, cnt = 0;
	for (i = 0; i < cnn->Layers[0].ActiMat[0].col; ++i)
	{
		for (j = 0; j < cnn->Layers[0].OutH; ++j)
		{
			for (k = 0; k < cnn->Layers[0].ActiMat[0].row; ++k)
			{
				//printf("%f ", data[cnt]);
				(cnn->Layers[0].ActiMat[j].element[k])[i] = data[cnt];
				++cnt;
			}
		}
	}
}

void inputCNNLabel(float *data, CNN* cnn)
{
	int i;
	for (i = 0; i < cnn->OnehotMat.col; ++i)
	{
		(cnn->OnehotMat.element[0])[i] = data[i];
	}
}

void NNFCforward(Layer* FCLayer, Mat* AM, int am_h)
{
	FCLayer->OriginMat = FCload(FCLayer->OriginMat, AM, am_h);
	// printf("OM:\n"); DumpMatplusHWCH(FCLayer->OriginMat, 1);

	FCLayer->SumMat = MatplusMulMat(FCLayer->SumMat, FCLayer->OutH, FCLayer->OriginMat, FCLayer->OutH, FCLayer->WeightMat);
	//printf("SM:\n"); DumpMatplusHWCH(FCLayer->SumMat, 1);

	FCLayer->ActiMat = MatplusActivate(FCLayer->SumMat, FCLayer->ActiMat, 1, FCLayer->ActiFuncNum);
	//printf("AM:\n"); DumpMatplusHWCH(FCLayer->ActiMat, 1);

	FCLayer->DeriMat = MatplusDerivate(FCLayer->ActiMat, FCLayer->DeriMat, FCLayer->OutH, FCLayer->ActiFuncNum);
	//printf("DM:\n"); DumpMatplusHWCH(FCLayer->DeriMat, 1);
}

void NNCVforward(Layer* CVLayer, Mat* AM, int am_h)
{
	CVLayer->OriginMat = MatPadReConstructBias(CVLayer->OriginMat, CVLayer->OutH, AM, am_h, CVLayer->KernelSize, CVLayer->Stride, CVLayer->PaddingMode);
	//printf("OM:\n"); DumpMatplusHWCC(CVLayer->OriginMat, CVLayer->OutH);

	CVLayer->SumMat = MatplusMulMat(CVLayer->SumMat, CVLayer->OutH, CVLayer->OriginMat, CVLayer->OutH, CVLayer->WeightMat);
	//printf("SM:\n"); DumpMatplusHWCC(CVLayer->SumMat, CVLayer->OutH);

	CVLayer->ActiMat = MatplusActivate(CVLayer->SumMat, CVLayer->ActiMat, CVLayer->OutH, CVLayer->ActiFuncNum);
	//printf("AM:\n"); DumpMatplusHWCC(CVLayer->ActiMat, CVLayer->OutH);

	CVLayer->DeriMat = MatplusDerivate(CVLayer->ActiMat, CVLayer->DeriMat, CVLayer->OutH, CVLayer->ActiFuncNum);
	//printf("DM:\n"); DumpMatplusHWCC(CVLayer->DeriMat, CVLayer->OutH);
}

void NNDCforward(Layer* DCLayer, Mat* AM, int am_h)
{
	DCLayer->OriginMat = MatplusMulMat(DCLayer->OriginMat, am_h, AM, am_h, DCLayer->KernelMat);
	// printf("OM:\n"); DumpMatplusHWCC(DCLayer->OriginMat, DCLayer->OutH);

	DCLayer->SumMat = MatPadReConstruct(DCLayer->SumMat, DCLayer->OutH, DCLayer->OriginMat, am_h, DCLayer->KernelSize, DCLayer->Stride, 0);
	// kernelsize maybe wrong for unmatch HW
	// printf("SM:\n"); DumpMatplusHWCC(DCLayer->SumMat, DCLayer->OutH);

	DCLayer->ActiMat = MatplusActivate(DCLayer->ActiMat, DCLayer->ActiMat, DCLayer->OutH, DCLayer->ActiFuncNum);
	// softmax maybe logical wrong with this struct
	// printf("AM:\n"); DumpMatplusHWCC(DCLayer->ActiMat, DCLayer->OutH);

	DCLayer->DeriMat = MatplusDerivate(DCLayer->ActiMat, DCLayer->DeriMat, DCLayer->OutH, DCLayer->ActiFuncNum);
	// printf("DM:\n"); DumpMatplusHWCC(DCLayer->DeriMat, DCLayer->OutH);
}

void NNMPforward(Layer* MPLayer, Mat* AM, int am_h)
{
	MPLayer->OriginMat = MatPadReConstruct(MPLayer->OriginMat, MPLayer->OutH, AM, am_h, MPLayer->KernelSize, MPLayer->Stride, MPLayer->PaddingMode);
	// printf("OM:\n"); DumpMatplusHWCC(MPLayer->OriginMat, MPLayer->OutH);

	MPLayer->ActiMat = MaxChoose(MPLayer->ActiMat, MPLayer->OutH, &MPLayer->KernelMat, MPLayer->OriginMat, MPLayer->OutH);
	// printf("AM:\n"); DumpMatplusHWCC(MPLayer->ActiMat, MPLayer->OutH);
}

void NNAPforward(Layer* APLayer, Mat* AM, int am_h)
{
	APLayer->OriginMat = MatPadReConstructBias(APLayer->OriginMat, APLayer->OutH, AM, am_h, APLayer->KernelSize, APLayer->Stride, APLayer->PaddingMode);
	// printf("OM:\n"); DumpMatplusHWCC(APLayer->OriginMat, APLayer->OutH);

	APLayer->ActiMat = MatplusMulMat(APLayer->ActiMat, APLayer->OutH, APLayer->OriginMat, APLayer->OutH, APLayer->KernelMat);
	// printf("AM:\n"); DumpMatplusHWCC(APLayer->ActiMat, APLayer->OutH);
}

void NNforwardLayer(Layer* layer, Mat* AM, int am_h)
{
	if (layer->LayerType == 0)
	{
		NNFCforward(layer, AM, am_h);
	}
	if (layer->LayerType == 1)
	{
		NNCVforward(layer, AM, am_h);
	}
	if (layer->LayerType == 2)
	{
		NNDCforward(layer, AM, am_h);
	}
	if (layer->LayerType == 3)
	{
		NNMPforward(layer, AM, am_h);
	}
	if (layer->LayerType == 4)
	{
		NNAPforward(layer, AM, am_h);
	}
}

float NNforwardSingle(CNN *cnn)
{
	int i;
	char s[40];
	for (i = 1; i < cnn->HiddenLayerNum + 2; ++i)
	{
		NNforwardLayer(&cnn->Layers[i], cnn->Layers[i - 1].ActiMat, cnn->Layers[i - 1].OutH);
	}
	float loss = -1.f;
	//MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0]);
	//MatDump(&cnn->OnehotMat);
	loss = LossFunction(&cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0], &cnn->OnehotMat, cnn->LossFuncNum);
	// printf("Loss:%s\n", F2S(loss, s));
	// should be transform to matplus and has maken sure that output layer is 1 of HW
	return loss;
}

//--------------------------------前向传播--------------------------------//

//--------------------------------反向传播--------------------------------//

Mat * ActiFunDerivation(Mat Mat_Sum, Mat* Mat_ActiFunDerivation, int option) {
	if (option == 0) {
		return MatDerivationNoneActi(&Mat_Sum, Mat_ActiFunDerivation);
	}
	else if (option == 1) {
		return MatDerivationSigmoid(&Mat_Sum, Mat_ActiFunDerivation);
	}
	else if (option == 2) {
		return MatDerivationTanh(&Mat_Sum, Mat_ActiFunDerivation);
	}
	else if (option == 3) {
		return MatDerivationRelu(&Mat_Sum, Mat_ActiFunDerivation);
	}
	else if (option == 4) {
		return MatDerivationLeakyRelu(0.1f, &Mat_Sum, Mat_ActiFunDerivation);
	}
	else if (option == 5) {
		return MatDerivationSoftmax(&Mat_Sum, Mat_ActiFunDerivation);
	}
	else {
		printf("error for ActiFunDerivation, please check ActiFsHidden  variable!\n");
	}
	return NULL;
}

Mat * LossFunDerivation(Mat *ActiMat, Mat *DerivativeActiMat, Mat One_hotMat, int option) {
	if (option == 0) {
		return MSEDerivative(ActiMat, DerivativeActiMat, One_hotMat);
	}
	else if (option == 1) {
		return CrossEntropyDerivative(ActiMat, DerivativeActiMat, One_hotMat);
	}
	else {
		printf("error for LossFunDerivation, please check Nstr_LossF  variable!\n");
	}
	return NULL;
}

Mat* NNFCbackward(Layer* FCLayer, Mat* fdel, int fdel_h)
{
	FCLayer->DeltaMat = MatplusProduct(FCLayer->DeltaMat, FCLayer->DeriMat, FCLayer->DeltaMat, FCLayer->OutH);
	// printf("FC Delta:\n"); DumpMatplusHWCH(FCLayer->DeltaMat, 1);
	fdel = MatplusMulMat_T_unmatch(fdel, fdel_h, FCLayer->DeltaMat, FCLayer->OutH, FCLayer->KernelMat);
	return fdel;
}

void NNFCCVNabla(Layer* FCLayer)
{
	FCLayer->NablaMat = *Matplus_TMulMatplusSumDes(&FCLayer->NablaMat, FCLayer->OriginMat, FCLayer->OutH, FCLayer->DeltaMat, FCLayer->OutH);
	// printf("FCCV Nabla:\n"); MatDump(&FCLayer->NablaMat);
}

Mat* NNCVbackward(Layer* CVLayer, Mat* fdel, int fdel_h)
{
	CVLayer->DeltaMat = MatplusProduct(CVLayer->DeltaMat, CVLayer->DeriMat, CVLayer->DeltaMat, CVLayer->OutH);
	// printf("CV Delta:\n"); DumpMatplusHWCC(CVLayer->DeltaMat, CVLayer->OutH);
	Mat* TmpMat = (Mat*)malloc(CVLayer->OutH * sizeof(Mat));
	int i;
	for (i = 0; i < CVLayer->OutH; ++i)
	{
		MatCreate(&TmpMat[i], CVLayer->OriginMat[0].row, CVLayer->OriginMat[0].col - 1);
	}
	TmpMat = MatplusMulMat_T(TmpMat, CVLayer->OutH, CVLayer->DeltaMat, CVLayer->OutH, CVLayer->KernelMat);
	fdel = MatDeConstructfwd(fdel, fdel_h, TmpMat, CVLayer->OutH, CVLayer->KernelSize, CVLayer->Stride, CVLayer->PaddingMode);
	for (i = 0; i < CVLayer->OutH; ++i)
	{
		MatDelete(&TmpMat[i]);
	}
	free(TmpMat);
	return fdel;
}

Mat* NNDCbackward(Layer* DCLayer, Mat* fdel, int fdel_h)
{
	DCLayer->DeltaMat = MatplusProduct(DCLayer->DeltaMat, DCLayer->DeriMat, DCLayer->DeltaMat, DCLayer->OutH);
	// printf("DC Delta:\n"); DumpMatplusHWCC(DCLayer->DeltaMat, DCLayer->OutH);
	Mat* TmpMat = (Mat*)malloc(fdel_h * sizeof(Mat));
	int i;
	for (i = 0; i < DCLayer->OutH; ++i)
	{
		MatCreate(&TmpMat[i], DCLayer->OriginMat[0].row, DCLayer->OriginMat[0].col);
	}
	TmpMat = MatPadReConstruct(TmpMat, fdel_h, DCLayer->DeltaMat, DCLayer->OutH, DCLayer->KernelSize, DCLayer->Stride, 0);
	fdel = MatplusMulMat_T(fdel, fdel_h, TmpMat, fdel_h, DCLayer->KernelMat);
	for (i = 0; i < DCLayer->OutH; ++i)
	{
		MatDelete(&TmpMat[i]);
	}
	free(TmpMat);
	return fdel;
}// the order of KernelSize may cause error

void NNDCNabla(Layer* DCLayer)
{
	DCLayer->NablaMat = *Matplus_TMulMatplusSumDes(&DCLayer->NablaMat, DCLayer->DeltaMat, DCLayer->OutH, DCLayer->OriginMat, DCLayer->OutH);
	// printf("DC Nabla:\n"); MatDump(&DCLayer->NablaMat);
}

Mat* NNMPbackward(Layer* MPLayer, Mat* fdel, int fdel_h)
{
	//printf("MP Delta:\n"); // DumpMatplusHWCC(MPLayer->DeltaMat, MPLayer->OutH);
	Mat* TmpMat = (Mat*)malloc(MPLayer->OutH * sizeof(Mat));
	//printf("MP Delta2:\n");
	int i;
	for (i = 0; i < MPLayer->OutH; ++i)
	{
		MatCreate(&TmpMat[i], MPLayer->OriginMat[0].row, MPLayer->OriginMat[0].col);
	}
	//printf("MP Delta3:\n");
	TmpMat = DeMaxChoose(TmpMat, MPLayer->OutH, &MPLayer->KernelMat, MPLayer->DeltaMat, MPLayer->OutH);
	//printf("MP Delta4:\n");
	fdel = MatDeConstructfwd(fdel, fdel_h, TmpMat, MPLayer->OutH, MPLayer->KernelSize, MPLayer->Stride, MPLayer->PaddingMode);
	//printf("MP Delta5:\n");
	for (i = 0; i < MPLayer->OutH; ++i)
	{
		MatDelete(&TmpMat[i]);
	}
	free(TmpMat);
	return fdel;
}//?? deconstruct

Mat* NNAPbackward(Layer* APLayer, Mat* fdel, int fdel_h)
{
	// printf("AP Delta:\n"); DumpMatplusHWCC(APLayer->DeltaMat, APLayer->OutH);
	Mat* TmpMat = (Mat*)malloc(APLayer->OutH * sizeof(Mat));
	int i;
	for (i = 0; i < APLayer->OutH; ++i)
	{
		MatCreate(&TmpMat[i], APLayer->OriginMat[0].row, APLayer->OriginMat[0].col);
	}
	TmpMat = MatplusMulMat_T(TmpMat, APLayer->OutH, APLayer->DeltaMat, APLayer->OutH, APLayer->KernelMat);
	fdel = MatDeConstructfwd(fdel, fdel_h, TmpMat, APLayer->OutH, APLayer->KernelSize, APLayer->Stride, APLayer->PaddingMode);
	for (i = 0; i < APLayer->OutH; ++i)
	{
		MatDelete(&TmpMat[i]);
	}
	free(TmpMat);
	return fdel;
}//?? deconstruct

Mat * NNOutputLayerBackward(CNN *cnn) {
	// printf("OneHot:\n"); MatDump(&cnn->OnehotMat);
	if (cnn->Layers[cnn->HiddenLayerNum + 1].ActiFuncNum == 5 && cnn->LossFuncNum == 1) {//softmax+crossentropy
		MatSub(&cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0], &cnn->OnehotMat, &cnn->Layers[cnn->HiddenLayerNum + 1].DeltaMat[0]);
	}
	else {
		Mat tempMat;
		MatCreate(&tempMat, 1, cnn->Layers[cnn->HiddenLayerNum + 1].OutC);

		LossFunDerivation(&cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0], &tempMat, cnn->OnehotMat, cnn->LossFuncNum);

		MatProduct(&tempMat, &cnn->Layers[cnn->HiddenLayerNum + 1].DeriMat[0], &cnn->Layers[cnn->HiddenLayerNum + 1].DeltaMat[0]);

		MatDelete(&tempMat);
		//MatDump(&P_DeltaMat[N_hidden + 1]);
	}
	// NNFCCVNabla(&cnn->Layers[cnn->HiddenLayerNum + 1], cnn->sampleCapacity); 
	//testcode
	// printf("IptActi:"); MatDump(&cnn->Layers[0].ActiMat[14]);
	// DumpMatplusHWCC(cnn->Layers[3].SumMat, 13);
	// DumpMatplusHWCC(cnn->Layers[4].ActiMat, 12);
	//28 14 13 13 12
	// MatDump(&cnn->Layers[6].OriginMat[0]);// cvp -> fc wrong?
	// MatDump(&cnn->Layers[6].WeightMat);// cvp -> fc wrong?
	// MatDump(&cnn->Layers[6].SumMat[0]);// cvp -> fc wrong?
	// DumpFloatArray(cnn->Layers[3].WeightMat.element[7], 4);
	// printf("ForOptActi:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum].ActiMat[0]);
	// printf("OptOM:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].OriginMat[0]);
	// printf("OptSum:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].SumMat[0]);
	// printf("OptActi:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0]);
	// printf("OptDeri:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].DeriMat[0]);
	// printf("OptOneHot:"); MatDump(&cnn->OnehotMat);
	// printf("OptDelta:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].DeltaMat[0]);
	// printf("OptNabla:"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].NablaMat);
	//testcode
	// Pre
	if(cnn->HiddenLayerNum > 0)
		cnn->Layers[cnn->HiddenLayerNum].DeltaMat = MatplusMulMat_T_unmatch(
			cnn->Layers[cnn->HiddenLayerNum].DeltaMat, cnn->Layers[cnn->HiddenLayerNum].OutH,
			cnn->Layers[cnn->HiddenLayerNum + 1].DeltaMat, cnn->Layers[cnn->HiddenLayerNum + 1].OutH,
			cnn->Layers[cnn->HiddenLayerNum + 1].KernelMat);
	return NULL;


}

void NNbackwardLayer(Layer* layer, Mat* fdel, int fdel_h)
{
	if (layer->LayerType == 0)
	{
		NNFCbackward(layer, fdel, fdel_h);
		NNFCCVNabla(layer);
		return;
	}
	if (layer->LayerType == 1)
	{
		NNCVbackward(layer, fdel, fdel_h);
		NNFCCVNabla(layer);
		return;
	}
	if (layer->LayerType == 2)
	{
		NNDCbackward(layer, fdel, fdel_h);
		NNDCNabla(layer);
		return;
	}
	if (layer->LayerType == 3)
	{
		NNMPbackward(layer, fdel, fdel_h);
		return;
	}
	if (layer->LayerType == 4)
	{
		NNAPbackward(layer, fdel, fdel_h);
		return;
	}
}

void NNbackwardSingle(CNN* cnn)
{
	int i;
	//printf("optbwd.\n");
	NNOutputLayerBackward(cnn);
	//printf("optnbl.\n");
	NNFCCVNabla(&cnn->Layers[cnn->HiddenLayerNum + 1]);
	//printf("bd.\n");
	for (i = cnn->HiddenLayerNum; i > 1; --i)
	{
		//printf("%d ", i);
		NNbackwardLayer(&cnn->Layers[i], cnn->Layers[i - 1].DeltaMat, cnn->Layers[i - 1].OutH);
	}
}

void NNDivGrad(CNN* cnn, int BatchSize)
{
	int i;
	for (i = cnn->HiddenLayerNum + 1; i > 0; --i)
	{
		float avg = 1.0 / BatchSize;
		MatNumMul(avg, &cnn->Layers[i].NablaMat, &cnn->Layers[i].NablaMat);
	}
}

//--------------------------------反向传播--------------------------------//

//--------------------------------优化算法--------------------------------//
void BGD(CNN *cnn, float alpha)
{
	Mat temp;
	int i;
	for (i = 1; i <= cnn->HiddenLayerNum + 1; ++i) {
		//MatDump(&P_WeightBiasMat[i]);
		if (cnn->Layers[i].LayerType == 3 || cnn->Layers[i].LayerType == 4)
			continue;
		MatCreate(&temp, cnn->Layers[i].NablaMat.row, cnn->Layers[i].NablaMat.col);
		MatNumMul(alpha, &cnn->Layers[i].NablaMat, &temp);
		// MatSub(&cnn->Layer[i].WeightBiasMat, &temp, &fcnn->Layer[i].WeightBiasMat);
		if (cnn->Layers[i].LayerType == 0 || cnn->Layers[i].LayerType == 1)
		{
			MatSub(&cnn->Layers[i].WeightMat, &temp, &cnn->Layers[i].WeightMat);
			MatReduceRow(&cnn->Layers[i].WeightMat, &cnn->Layers[i].KernelMat);
		}
		else {
			MatSub(&cnn->Layers[i].KernelMat, &temp, &cnn->Layers[i].KernelMat);
		}
		MatDelete(&temp);
	}
}

// Adam优化器
typedef struct {
	float beta1;
	float beta2;
	float eta;
	float epsilon;

	int time;

	Mat *v;
	Mat *hat_v;
	Mat *s;
	Mat *hat_s;
	Mat *hat_g;
}AdamPara;



void SpaceCreateAdamPara(CNN *fcnn, AdamPara *adamPara) {

	adamPara->v = (Mat *)malloc((fcnn->HiddenLayerNum + 2) * sizeof(Mat));
	adamPara->hat_v = (Mat *)malloc((fcnn->HiddenLayerNum + 2) * sizeof(Mat));
	adamPara->s = (Mat *)malloc((fcnn->HiddenLayerNum + 2) * sizeof(Mat));
	adamPara->hat_s = (Mat *)malloc((fcnn->HiddenLayerNum + 2) * sizeof(Mat));
	adamPara->hat_g = (Mat *)malloc((fcnn->HiddenLayerNum + 2) * sizeof(Mat));

	adamPara->v[0].row = 0;
	adamPara->v[0].col = 0;
	adamPara->hat_v[0].row = 0;
	adamPara->hat_v[0].col = 0;
	adamPara->s[0].row = 0;
	adamPara->s[0].col = 0;
	adamPara->hat_s[0].row = 0;
	adamPara->hat_s[0].col = 0;
	adamPara->hat_g[0].row = 0;
	adamPara->hat_g[0].col = 0;

	int m, n;
	int i;
	for (i = 1; i < fcnn->HiddenLayerNum + 2; ++i) {
		if (fcnn->Layers[i].LayerType == 0||fcnn->Layers[i].LayerType == 1)
		{
			m = fcnn->Layers[i].WeightMat.row;
			n = fcnn->Layers[i].WeightMat.col;
		}
		else {
			m = fcnn->Layers[i].KernelMat.row;
			n = fcnn->Layers[i].KernelMat.col;
		}
		MatCreate(&adamPara->v[i], m, n);
		MatZeros(&adamPara->v[i]);
		MatCreate(&adamPara->hat_v[i], m, n);
		MatZeros(&adamPara->hat_v[i]);
		MatCreate(&adamPara->s[i], m, n);
		MatZeros(&adamPara->s[i]);
		MatCreate(&adamPara->hat_s[i], m, n);
		MatZeros(&adamPara->hat_s[i]);
		MatCreate(&adamPara->hat_g[i], m, n);
		MatZeros(&adamPara->hat_g[i]);
	}
}

void initAdam(CNN fcnn, AdamPara *adamPara) {
	adamPara->beta1 = 0.9;
	adamPara->beta2 = 0.999;
	adamPara->eta = 0.001;
	adamPara->epsilon = 0.00000008;
	SpaceCreateAdamPara(&fcnn, adamPara);

	adamPara->time = 1;
}

// Adam
void Adam(CNN *fcnn, AdamPara *adamPara) {
	//Mat temp;
	int i;
	for (i = 1; i <= fcnn->HiddenLayerNum + 1; ++i) {
		// formula (1)
		MatNumMul(adamPara->beta1, &adamPara->v[i], &adamPara->v[i]);
		MatNumMul(1 - adamPara->beta1, &fcnn->Layers[i].NablaMat, &adamPara->hat_g[i]);
		MatAdd(&adamPara->v[i], &adamPara->hat_g[i], &adamPara->v[i]);


		// formula (2)
		MatNumMul(adamPara->beta2, &adamPara->s[i], &adamPara->s[i]);
		MatSquare(&adamPara->hat_g[i], &adamPara->hat_g[i]);
		MatNumMul(1 - adamPara->beta2, &adamPara->hat_g[i], &adamPara->hat_g[i]);
		MatAdd(&adamPara->s[i], &adamPara->hat_g[i], &adamPara->s[i]);

		// formula (3)(4)
		MatNumMul((1 / (1 - pow(adamPara->beta1, adamPara->time))), &adamPara->v[i], &adamPara->hat_v[i]);
		MatNumMul((1 / (1 - pow(adamPara->beta2, adamPara->time))), &adamPara->s[i], &adamPara->hat_s[i]);

		// formula (5)
		MatNumMul(adamPara->eta, &adamPara->hat_v[i], &adamPara->hat_v[i]);
		MatSqrt(&adamPara->hat_s[i], &adamPara->hat_s[i]);
		MatNumAdd(adamPara->epsilon, &adamPara->hat_s[i], &adamPara->hat_s[i]);
		MatDiv(&adamPara->hat_v[i], &adamPara->hat_s[i], &adamPara->hat_g[i]);

		if (fcnn->Layers[i].LayerType == 0 || fcnn->Layers[i].LayerType == 1)
		{
			MatSub(&fcnn->Layers[i].WeightMat, &adamPara->hat_g[i], &fcnn->Layers[i].WeightMat);
			MatReduceRow(&fcnn->Layers[i].WeightMat, &fcnn->Layers[i].KernelMat);
		}
		else {
			MatSub(&fcnn->Layers[i].KernelMat, &adamPara->hat_g[i], &fcnn->Layers[i].KernelMat);
		}
		MatSub(&fcnn->Layers[i].WeightMat, &adamPara->hat_g[i], &fcnn->Layers[i].WeightMat);
	}

	adamPara->time = adamPara->time + 1;
}

//--------------------------------优化算法--------------------------------//

//--------------------------------准确度测试------------------------------//
int judge_max(float arr[], int n)
{
	int index = 0;
	int i;
	float max = arr[0];
	for (i = 1; i < n; i++)
	{
		//printf("%f ", arr[i]);
		if (arr[i] > max)
		{
			max = arr[i];
			index = i;
		}
	}
	//printf("%d\n", index);
	return index;
}

void testAccAndLoss(CNN *cnn, DataSet dst) {
	int i;
	char buf[40];
	int accuryNum = 0;
	double Loss = 0.0;
	for (i = 0; i < dst.TestSampleNum; ++i) {
		inputCNN(dst.TestFeature.element[i], cnn);
		inputCNNLabel(dst.TestLabel.element[i], cnn);
		Loss = Loss + NNforwardSingle(cnn);
		printf("%d:",i); DumpFloatArray(cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0].element[0], 10);
		printf("%d:",i); DumpFloatArray(cnn->OnehotMat.element[0], 10);
		int maxi = judge_max(cnn->Layers[cnn->HiddenLayerNum + 1].ActiMat[0].element[0], cnn->ClassificationNum);
		//printf("%d %f ", maxi, (dst.TestLabel.element[i])[maxi]);
		if (equal(1.0, (dst.TestLabel.element[i])[maxi])) {
			++accuryNum;
		}
		//printf("%d %f ", i, Loss);
		//printf("%d", accuryNum);
	}
	printf("testloss=%s %e  acc=%d/%d ======\n", F2S((float)(Loss / dst.TestSampleNum), buf), Loss / dst.TestSampleNum, accuryNum, dst.TestSampleNum);
}
//--------------------------------准确度测试------------------------------//

void ZeroGrad(CNN *cnn)
{
	int i;
	for (i = 1; i < cnn->HiddenLayerNum + 2; ++i)
	{
		if (cnn->Layers[i].LayerType == 0 || cnn->Layers[i].LayerType == 1 || cnn->Layers[i].LayerType == 2)
		{
			MatZeros(&cnn->Layers[i].NablaMat);
		}
	}
}

// Given Feature Mat and Label Mat, do Forward Propagation in a batch.
float NNpropagationBatch(int BatchSize, Mat* FeatureMat, Mat* LabelMat, CNN* cnn)
{
	int i;
	float loss = 0.0;
	int size;
	char buf[40];
	ZeroGrad(cnn);
	// MatDump(&cnn->Layers[3].KernelMat);
	for (i = 0; i < BatchSize; ++i)
	{
		// Doad a line from FeatureMat to CNN input Layer
		if (cnn->Layers[0].OutH * cnn->Layers[0].ActiMat->row * cnn->Layers[0].ActiMat->col != FeatureMat->col)
		{
			printf("\t\terr check, mismatching matrix for NNforward\t\t\n");
			printf("\t\tFeatureMatShape:\n\t\t\t");
			MatShape(FeatureMat);
			printf("\t\tlabelMatMatShape:\n\t\t\t");
			MatplusShape(cnn->Layers[0].ActiMat, cnn->Layers[0].OutH);
			return -1.f;
		}
		//printf("inputCNN.\n");
		inputCNN(FeatureMat->element[i], cnn);
		//printf("inputLabel.\n");
		inputCNNLabel(LabelMat->element[i], cnn);
		// DumpMatplusHWCC(cnn->Layers[0].ActiMat, cnn->Layers[0].OutH);
		// MatDump(&cnn->OnehotMat);
		//printf("fwd.\n");
		loss = loss + NNforwardSingle(cnn);
		// printf("%f %d\n", (cnn->Layers[4].KernelMat.element[0])[1], (int)(cnn->Layers[4].KernelMat.element[0])[1]);
		// printf("Sin %d loss + : %s\n", i, F2S(loss, buf));
		// printf("%d ", i);
		// DumpMatplusHWCC(cnn->Layers[3].ActiMat, cnn->Layers[3].OutH);
		// DumpMatplusHWCC(cnn->Layers[4].OriginMat, cnn->Layers[4].OutH);
		// DumpMatplusHWCC(cnn->Layers[3].SumMat, cnn->Layers[3].OutH);
		// DumpMatplusHWCC(cnn->Layers[4].ActiMat, cnn->Layers[4].OutH);
		// DumpMatplusHWCC(cnn->Layers[3].DeriMat, cnn->Layers[3].OutH);
		
		//printf("bwd.\n");
		NNbackwardSingle(cnn);
		//printf("end bwd.\n");
		NNDivGrad(cnn, BatchSize);
		// printf("Delta");DumpMatplusHWCH(cnn->Layers[3].DeltaMat, cnn->Layers[3].OutH);
		// printf("Deri");DumpMatplusHWCC(cnn->Layers[3].DeriMat, cnn->Layers[3].OutH);
		// printf("Origin");DumpMatplusHWCC(cnn->Layers[3].OriginMat, cnn->Layers[3].OutH);
		// printf("Nabla");MatDump(&cnn->Layers[6].NablaMat);
		// printf("Deri-1");DumpMatplusHWCH(cnn->Layers[5].DeriMat, cnn->Layers[5].OutH);
		// printf("Delta-1");DumpMatplusHWCH(cnn->Layers[2].DeltaMat, cnn->Layers[2].OutH);
	}
	// DumpFloatArray(FeatureMat->element[i], 784);
	// DumpMatplusHWCC(cnn->Layers[0].ActiMat, cnn->Layers[0].OutH);
	//printf("Nabla");MatDump(&cnn->Layers[3].NablaMat);
	//printf("fmrWeight");MatDump(&cnn->Layers[3].WeightMat);
	loss = loss / BatchSize;
	if(cnn->OptFuncNum == 0)
	{
		BGD(cnn, 0.95);
	}
	else if(cnn->OptFuncNum == 1)
	{
		AdamPara adamPara;
		initAdam(*cnn, &adamPara);
		Adam(cnn, &adamPara);
	}
	else {
		printf("OptFincNum error.\n");
	}
	//printf("aftWeight");MatDump(&cnn->Layers[3].WeightMat);
	// printf("OptNabla:\n"); MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].NablaMat);
	// printf("OptWeight:\n");
	// MatDump(&cnn->Layers[cnn->HiddenLayerNum + 1].WeightMat);
	// DumpFloatArray(cnn->Layers[cnn->HiddenLayerNum + 1].NablaMat.element[0], cnn->Layers[cnn->HiddenLayerNum + 1].WeightMat.col);
	return loss;
}

float NNpropagation(DataSet dst, CNN* cnn)
{
	int i;
	float loss;
	float eloss = 0.0;
	char buf[40];
	for (i = 0; i < dst.BatchNum; ++i)
	{
		//printf("%d ", i);
		if (i == dst.BatchNum - 1 && dst.remainder != 0)
			loss = NNpropagationBatch(dst.remainder, &dst.BatchTrainFeature[i], &dst.BatchTrainLabel[i], cnn);
		else loss = NNpropagationBatch(dst.BatchSize, &dst.BatchTrainFeature[i], &dst.BatchTrainLabel[i], cnn);
		//if (i == 0)
			//DumpMatplusHWCC(cnn->Layers[3].DeltaMat, cnn->Layers[3].OutH);
			//MatDump(&cnn->Layers[3].DeltaMat[7]);
		eloss += loss;
		//printf("Batch %3d loss:%s.\n", i, F2S(loss, buf));
		if( i % 20 == 0 || i == dst.BatchNum - 1)
		{
			printf("===== Batch:%d/%d: ", i, dst.BatchNum);
			// MatDump(&cnn->Layers[cnn->HiddenLayerNum].ActiMat[0]);
			// MatDump(&cnn->Layers[cnn->HiddenLayerNum].OriginMat[0]);
			// DumpMatplusHWCC(cnn->Layers[3].SumMat, cnn->Layers[3].OutH);
			testAccAndLoss(cnn, dst);
			// DumpFloatArray(cnn->Layers[cnn->HiddenLayerNum + 1].NablaMat.element[1], cnn->Layers[cnn->HiddenLayerNum + 1].NablaMat.col);
			// DumpFloatArray(cnn->Layers[cnn->HiddenLayerNum + 1].WeightMat.element[1], cnn->Layers[cnn->HiddenLayerNum + 1].WeightMat.col);
		}
	}
	return eloss;
}

/************************************************************************/
/*                              CNN传播                                  */
/************************************************************************/

int main()
{
	Predefine pdf;
	InitPredefine(&pdf);
    int TrainSampleNum = 50000;
    int TestSampleNum = 10000;
    int Height = 32;
    int Width = 32;
    int Channel = 3;
    int ClassifyNum = 10;
	int sig;
	// upline:C2MSVL annonatation area
    sig = DR_loadDS(TrainSampleNum, TestSampleNum, Height, Width, Channel, ClassifyNum, &pdf);
    if (sig != 0)
	{
		printf("Dataset parr wrong.\n");
		return sig;
	}
	// downline:C2MSVL annonatation area
	int LayerType[] = { 
        0,1,3,1,3,0,0,0 };
	int LayerParameters[] = {
		1,5,5,1,1,64,3,
		1,3,3,2,2,
		1,5,5,1,1,64,3,
		1,3,3,2,2,
		384,3,
		192,3,
		10,5 };
	pdf.HiddenLayerNum = 6;
	pdf.WeightInitWayNum = 3;
	pdf.LossFuncNum = 1;
	pdf.BatchSize = 64;
	pdf.LayerType = LayerType;
	pdf.LayerParameters = LayerParameters;
	pdf.ParametersTotal = 30;
	pdf.OptFuncNum = 0;

	/*int i;
	for (i = 0; i < 784; ++i)
	{
		if (i % 28 == 0) printf("\n");
		printf("%f ", XVal[i]);
	}*/


	//DumpPredefine(pdf);

	DataSet dataset;
	CNN cnn;
	LoadParaFromPredefine(pdf, &dataset, &cnn);
	DataConstruct(&dataset);
	//DumpDataSet(dataset);

	/*for (sig = 0; sig < 28; ++sig)
	{
		DumpFloatArray(dataset.BatchTrainFeature[0].element[sig] + 392, 28);
	}*/

	CreateNNSpaceAndLoadinPara2Layer(&cnn, pdf);
	//DumpCNN(cnn);

	NNWeightInit(&cnn);
	// DumpKWofCNN(cnn);

	int epoch = 10;
	int eph;
	for (eph = 0; eph < epoch; ++eph)
	{
		printf("epoch:%d/%d:\n", eph, epoch);
		NNpropagation(dataset, &cnn);
	}
	// NNpropagationBatch(dataset.BatchSize, &dataset.BatchTrainFeature[0], &dataset.BatchTrainLabel[0], &cnn);

	return 0;
}
