﻿using DenseCRF;
using FCN;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace CNN
{
	public class IMGLAB
	{
		public Matrix[] anno = new Matrix[0];
	      public lable[] lab = new lable[0];
	}
    public class lable
    {
       public  float[] output = new float[0];
		public int tag = 0;
    }
	public class cnnweights
	{
		public Matrix[][,] weights;
		public float[] []bias;
		public float[][] fullweights;
	}
	public enum activateFun { ReLU, sigmoid }
	public class NeuralNetwork
	{
		float alpha = 0.85f;
		activateFun ATCFUN = activateFun.ReLU;
		public cnnweights Train(List<Layer> layers, Matrix[] anno1, lable[] lables, int numepochs = 1, activateFun af= activateFun.sigmoid)
		{
			ATCFUN = af;
			int Ccount = 0;
			foreach (Layer la in layers)
			{
				if (la is convlayer)
					Ccount++;
				if (la is outputlayer)
					if (!(la as outputlayer).isFull)
						Ccount++;

			}
			cnnweights cnnw = new cnnweights();
			cnnw.weights = new Matrix[Ccount][,];
			cnnw.bias = new float[Ccount][];
			int trainNum = anno1.Length;

			float[] loss = new float[trainNum];
			int cc = 0;
			do
			{

				for (int n = 0; n < trainNum; n++)
				{
					//printf("%d\n",n);
					forward(layers, anno1[n]);  // 前向传播，这里主要计算各
					cnnbp(layers, lables[n].output); // 后向传播，这里主要计算各神经元的误差梯度

					update(layers, anno1[n]);

					clear(layers);
					//cnnclear(cnn);
					// 计算并保存误差能量
					float l = 0.0f;
					int i;
					outputlayer outlayer = (outputlayer)layers[layers.Count - 1];
					for (i = 0; i < outlayer.outChannels; i++)
						l = l + outlayer.err[i] * outlayer.err[i];
					if (n == 0)
						loss[n] = l / (float)2.0;
					else
						loss[n] = (float)Math.Round(loss[n - 1] * 0.99 + 0.01 * l / 2.0,8);
				}
				cc++;
			} while (cc < numepochs);
			Ccount = 0;
			foreach (Layer la in layers)
			{
				if (la is convlayer)
				{
					cnnw.weights[Ccount] = (la as convlayer).weights;
					cnnw.bias[Ccount] = (la as convlayer).basicData;
					Ccount++;
				}
				if (la is outputlayer)
				{
					if ((la as outputlayer).isFull)
						cnnw.fullweights = (la as outputlayer).wdata;
					else
					{
						cnnw.weights[Ccount] = (la as outputlayer).weights;
						cnnw.bias[Ccount] = (la as outputlayer).basicData;
					}
				}


			}
			return cnnw;

		}
		//void softmax(float[] x, int row, int column)
		//{
		//	for (int j = 0; j < row; ++j)
		//	{
		//		float max = 0.0f;
		//		float sum = 0.0f;
		//		for (int k = 0; k < column; ++k)
		//			if (max < x[k + j * column])
		//				max = x[k + j * column];
		//		for (int k = 0; k < column; ++k)
		//		{
		//			x[k + j * column] =(float)Math.Exp(x[k + j * column] - max);    // prevent data overflow
		//			sum += x[k + j * column];
		//		}
		//		for (int k = 0; k < column; ++k) x[k + j * column] /= sum;
		//	}
		//}
		public void FCNidentify(List<Layer> layers, cnnweights cnnw, Matrix[] anno1, activateFun af = activateFun.sigmoid)
		{
			ATCFUN = af;
			int count = 0;
			outputlayer output = null;
			foreach (Layer la in layers)
			{
				if (la is convlayer)
				{
					(la as convlayer).weights = cnnw.weights[count];
					(la as convlayer).basicData = cnnw.bias[count];
					count++;
				}
				if (la is outputlayer)
				{

					output = la as outputlayer;
					if (output.isFull)
						output.wdata = cnnw.fullweights;
					else
						output.weights = cnnw.weights[count];
					output.basicData = cnnw.bias[count];
				}


			}
			foreach (Matrix ao in anno1)
			{
				forward(layers, ao);
				fnnbp(layers);
			}

		}
		void fnnbp(List<Layer> cnn)
		{
			for (int a = cnn.Count - 1; a > 0; a--) 
			{
				Layer C3 = cnn[a];
				Layer S2 = cnn[a-1];
				Matrix[] matrixout = new Matrix[C3.outChannels];
				for (int j = 0; j < C3.outChannels; j++)
				{
					for (int i = 0; i < S2.outChannels; i++)
					{
						float[,] weight = Matrix.rot180(C3.weights[i, j].values);
						float[,] temp;
						temp = Matrix.convnFull(C3.y[j].values, weight, C3.stride, C3.padding, true);
						if (matrixout[j] == null)
							matrixout[j] = new Matrix(temp.GetLength(0), temp.GetLength(1));
						matrixout[j] = Matrix.MatrixAdd(matrixout[j].values, temp);
					}
				}

			}

		}
				public int  CNNidentify(List<Layer> layers,cnnweights cnnw, Matrix anno1)
		{
			int count = 0;
			outputlayer output = null;
			foreach (Layer la in layers)
			{
				if (la is convlayer)
				{
					  (la as convlayer).weights= cnnw.weights[count];
					 (la as convlayer).basicData= cnnw.bias[count];
					count++;
				}
				if (la is outputlayer )
				{
					
					output = la as outputlayer;
					if (output.isFull)
						output.wdata= cnnw.fullweights;
					else
						output.weights = cnnw.weights[count];
				    	output.basicData = cnnw.bias[count];
				}


			}
			forward(layers, anno1);
			return vecmaxIndex(output.y, output.outChannels);
		}
		int vecmaxIndex(Matrix[] vec, int veclength)// 返回向量最大数的序号
		{
			int i;
			float maxnum = -1.0f;
			int maxIndex = 0;
			for (i = 0; i < veclength; i++)
			{
				if (maxnum < vec[i].values[0,0])
				{
					maxnum = vec[i].values[0, 0];
					maxIndex = i;
				}
			}
			return maxIndex;
		}
		private void clear(List<Layer> cnn)
		{
			for (int a = 0; a < cnn.Count; a++)
			{
				if (cnn[a].d != null)
					cnn[a].d = new Matrix[cnn[a].d.Length];
				if(cnn[a].v!=null)
				cnn[a].v = new Matrix[cnn[a].v.Length];
				if (cnn[a].y != null)
					cnn[a].y = new Matrix[cnn[a].y.Length];
			}
		}

		float[,] multifactor( float[,] mat, float factor)// 矩阵乘以系数
		{
			int w = mat.GetLength(0);
			int h = mat.GetLength(1);
			float[,] res = new float[w,h];
			int i, j;
			
			for (i = 0; i < w; i++)
				for (j = 0; j <h; j++)
					res[i,j] = mat[i,j] * factor;
			return res;
		}

		void updataWB(Layer C1, Poolinglayer S2, Matrix inputData)
		{
			Matrix tempmx = new Matrix();
			for (int i = 0; i < C1.outChannels; i++)
			{

				for (int j = 0; j < C1.inChannels; j++)
				{
					if (S2 == null)
					{
						tempmx = inputData;
					}
					else
					{

						tempmx = S2.y[j];
					}
					float[,] C1dk;

					C1dk = tempmx.convolution(C1.d[i].values, C1.stride, C1.padding);

					C1dk = multifactor(C1dk, -1 * alpha);
					//addmat(cnn->C1->mapData[j][i], cnn->C1->mapData[j][i], mapSize, C1dk, mapSize);
					C1.weights[j, i] = Matrix.MatrixAdd(C1.weights[j, i].values, C1dk);
				}
				C1.basicData[i] = C1.basicData[i] - alpha * summat(C1.d[i].values);
			}
		}
		void update(List<Layer> cnn, Matrix inputData)
		{
			for (int a = 0; a < cnn.Count; a++)
			{
				if (cnn[a] is convlayer)
				{
					convlayer C1 = cnn[a] as convlayer;
					Poolinglayer S2 = null;
				
					if (a == 0)
					{
						//tempmx = inputData;
					}
					else
					{
						S2 = cnn[a - 1] as Poolinglayer;

					}
					updataWB(C1, S2, inputData);
				}
				if (cnn[a] is outputlayer)
				{
					outputlayer O5 =cnn[a] as outputlayer;
					Poolinglayer S4 = cnn[a - 1] as Poolinglayer;
					if (O5.isFull)
					{
						float[] O5inData = new float[O5.inChannels];
						int w = S4.w;
						int h = S4.h;
						//nSize outSize = { cnn->S4->inputWidth / cnn->S4->mapSize, cnn->S4->inputHeight / cnn->S4->mapSize };
						for (int i = 0; i < (S4.outChannels); i++)
							for (int r = 0; r < w; r++)
								for (int c = 0; c < h; c++)
									O5inData[i * w * h + r * h + c] = S4.y[i].values[r, c];

						for (int j = 0; j < O5.outChannels; j++)
						{
							for (int i = 0; i < O5.inChannels; i++)
								O5.wdata[j][i] = O5.wdata[j][i] - alpha * O5.d[j].values[0, 0] * O5inData[i];
							O5.basicData[j] = O5.basicData[j] - alpha * O5.d[j].values[0, 0];
						}
					}
					else
					{

						updataWB(O5, S4, inputData);

					}
				}
			}
		}
		float summat(float[,] mat) // 矩阵各元素的和
		{
			float sum = 0.0f;
			int i, j;
			int w = mat.GetLength(0);
			int h = mat.GetLength(0);
			for (i = 0; i < w; i++)
				for (j = 0; j < h; j++)
					sum = sum + mat[i,j];
			return sum;
		}
		void backoutput(outputlayer O5, Poolinglayer S4, float[] outputData)
		{
			
			for (int i = 0; i < O5.outChannels; i++)
				O5.err[i] = O5.y[i].values[0, 0] - outputData[i];

			/*从后向前反向计算*/
			// 输出层O5
			for (int i = 0; i < O5.outChannels; i++)
			{
				if (O5.d[i] == null)
					O5.d[i] = new Matrix(1, 1);
				if (ATCFUN == activateFun.ReLU)
					O5.d[i].values[0, 0] = O5.err[i] * (Matrix.activation_ReLU(O5.y[i].values).values[0, 0]);
				if (ATCFUN == activateFun.sigmoid)
					O5.d[i].values[0, 0] = O5.err[i] * (Matrix.activation_Sigma(O5.y[i].values).values[0, 0]);
			}



			
			int outSizer = S4.w;
			int outSizec = S4.h;
			for (int i = 0; i < S4.outChannels; i++)
				for (int r = 0; r < outSizer; r++)
					for (int c = 0; c < outSizec; c++)
						for (int j = 0; j < O5.outChannels; j++)
						{
							int wInt = i * outSizec * outSizer + r * outSizec + c;
							if (S4.d[i] == null)
								S4.d[i] = new Matrix(outSizer, outSizec);
							S4.d[i].values[r, c] = S4.d[i].values[r, c] + O5.d[j].values[0, 0] * O5.wdata[j][wInt];
							//if (S4.d[i].values[r, c] > 0)
							//{ 
							//}
						}
		}
		void outputbackcov(Layer C3, Poolinglayer S2, float[] outputData)
		{
			for (int i = 0; i < C3.outChannels; i++)
				C3.err[i] = C3.y[i].values[0, 0] - outputData[i];

			/*从后向前反向计算*/
			// 输出层O5
			for (int i = 0; i < C3.outChannels; i++)
			{
				if (C3.d[i] == null)
					C3.d[i] = new Matrix(1, 1);
				if (ATCFUN == activateFun.ReLU)
					C3.d[i].values[0, 0] = C3.err[i] * (Matrix.activation_ReLU(C3.y[i].values).values[0, 0]);
				if (ATCFUN == activateFun.sigmoid)
					C3.d[i].values[0, 0] = C3.err[i] * (Matrix.activation_Sigma(C3.y[i].values).values[0, 0]);
			}

			for (int i = 0; i < S2.outChannels; i++)
			{
				for (int j = 0; j < C3.outChannels; j++)
				{
					float[,] weight = Matrix.rot180(C3.weights[i, j].values);
					float[,] temp;
					temp = Matrix.convnFull(C3.d[j].values, weight, C3.stride, C3.padding, !(C3.w == S2.w));
					if (S2.d[i] == null)
						S2.d[i] = new Matrix(temp.GetLength(0), temp.GetLength(1));
					S2.d[i] = Matrix.MatrixAdd(S2.d[i].values, temp);
				}
			}
		}
		void backcov(Layer C3, Poolinglayer S2)
		{

			for (int i = 0; i < S2.outChannels; i++)
			{
				for (int j = 0; j < C3.outChannels; j++)
				{
					float[,] weight = Matrix.rot180(C3.weights[i, j].values);
					float[,] temp;
					temp = Matrix.convnFull(C3.d[j].values, weight, C3.stride, C3.padding, !(C3.w == S2.w));
					if (S2.d[i] == null)
						S2.d[i] = new Matrix(temp.GetLength(0), temp.GetLength(1));
					S2.d[i] = Matrix.MatrixAdd(S2.d[i].values, temp);
				}
			}
		}
		void cnnbp(List<Layer> cnn, float[] outputData)
		{
			for (int a = cnn.Count - 1; a > 0; a--)
			{
				if (cnn[a] is outputlayer)
				{
					outputlayer O5 = cnn[a] as outputlayer;
					Poolinglayer S4 = cnn[a - 1] as Poolinglayer;
					if (O5.isFull)
						backoutput(O5, S4, outputData);
					else
						outputbackcov(O5, S4, outputData);
				}
				else if (cnn[a] is convlayer)
				{
					convlayer C3 = cnn[a] as convlayer;
					Poolinglayer S2 = cnn[a - 1] as Poolinglayer;
					backcov(C3, S2);
				}
				else if (cnn[a] is Poolinglayer)
				{
					Poolinglayer S4 = cnn[a] as Poolinglayer;
					convlayer C3 = cnn[a - 1] as convlayer;
					for (int i = 0; i < C3.outChannels; i++)
					{
						float[,] C3e = Matrix.kroneckerAvg(S4.d[i].values, S4.stride);
						for (int r = 0; r < C3e.GetLength(0); r++)
							for (int c = 0; c < C3e.GetLength(1); c++)
							{
								if (C3.d[i] == null)
									C3.d[i] = new Matrix(C3e.GetLength(0), C3e.GetLength(1));
								if (ATCFUN == activateFun.ReLU )
									C3.d[i].values[r, c] = C3e[r, c] * getRELUGradFromY(C3.y[i].values[r, c]) / (float)(S4.stride * S4.stride);
								if (ATCFUN == activateFun.sigmoid)
									C3.d[i].values[r, c] = C3e[r, c] * sigma_derivation(C3.y[i].values[r, c]) / (float)(S4.stride * S4.stride);
							}

					}
				}
               
			}

		}
		float getRELUGradFromY(float x)
		{
			if (x > 0.0f) return 1.0f;
			else return 0.0f;
		}
		float sigma_derivation(float y)
		{ // Logic激活函数的自变量微分
			return y * (1 - y); // 这里y是指经过激活函数的输出值，而不是自变量
		}
		void conv(Layer C1, Matrix[] inputData)
		{
			int i, j;
			for (i = 0; i < (C1.outChannels); i++)
			{
				for (j = 0; j < (C1.inChannels); j++)
				{
					float[,] temp;
					  
						temp = inputData[j].convolution(C1.weights[j, i].values, C1.stride, C1.padding);//向前传播   

					//	float** mapout = cov(cnn->C1->mapData[j][i], mapSize, inputData, inSize, valid);
					if (C1.v[i] == null)
						C1.v[i] = new Matrix(temp.GetLength(0), temp.GetLength(1));
					C1.w = temp.GetLength(0);
					C1.h = temp.GetLength(1);
					C1.v[i] = Matrix.MatrixAdd(C1.v[i].values, temp);

				}
				if (C1.y[i] == null)
					C1.y[i] = new Matrix(C1.v[i].values.GetLength(0), C1.v[i].values.GetLength(1));
				if (ATCFUN == activateFun.sigmoid)
					C1.y[i] = Matrix.activation_Sigma(C1.v[i].values, C1.basicData[i]);
				if (ATCFUN == activateFun.ReLU)
					C1.y[i] = Matrix.activation_ReLU(C1.v[i].values, C1.basicData[i]);
			}
		}
		void pooing(Poolinglayer S2, convlayer C1) {
			int i;
		 
			for (i = 0; i < (S2.outChannels); i++)
			{
				if (S2.y[i] == null)
					S2.y[i] = new Matrix(C1.v[i].values.GetLength(0), C1.v[i].values.GetLength(1));
				S2.y[i].values = Matrix.averPooling(C1.y[i].values, S2.stride);
				S2.w = S2.y[i].values.GetLength(0);
				S2.h = S2.y[i].values.GetLength(1);
				//avgPooling(cnn->S2->y[i], outSize, cnn->C1->y[i], inSize, cnn->S2->mapSize);
			}
		}
		void forward(List<Layer>  cnn, Matrix inputData)
		{
			for (int a = 0; a < cnn.Count; a++)
			{
				if (cnn[a] is convlayer)
				{
					convlayer C1 = (convlayer)cnn[a];
					if (a == 0)
						conv(C1, new Matrix[] { inputData });
					else
					{
						Layer L1 = cnn[a - 1];
						conv(C1, L1.y);
					}

				}
				if (cnn[a] is Poolinglayer)
				{
					Poolinglayer S1 = (Poolinglayer)cnn[a];
					convlayer C1 = (convlayer)cnn[a - 1];
					pooing(S1, C1);
				}
				if (cnn[a] is outputlayer)
				{
					outputlayer O5 = (outputlayer)cnn[a];
					Poolinglayer S4 = (Poolinglayer)cnn[a - 1];
					if (O5.isFull)
					{
						float[] O5inData = new float[O5.inChannels];
						int outSizer = S4.w;
						int outSizec = S4.h;
						for (int i = 0; i < (S4.outChannels); i++)
							for (int r = 0; r < outSizer; r++)
								for (int c = 0; c < outSizec; c++)
								{
									O5inData[i * outSizer * outSizec + r * outSizec + c] = S4.y[i].values[r, c];
								}

						//全连接，把12*（4*4）拆成一维192长度，O5.wdata,为10种分类创建一个10*192的权重，每种权重与192 卷积，得到 10个 概率。
						//全卷积 把 12个（4*4）与 （N*N）权重，卷积后累加合成一个，如果说，输入12个MAP，输出10个，输入的12个先卷积，
						//后累加，变成一个MAP，一共10次，得到 10个 概率。
						nnffall(O5.v, O5inData, O5.wdata, O5.basicData, O5.inChannels, O5.outChannels);
						for (int i = 0; i < O5.outChannels; i++)
						{
							if (ATCFUN == activateFun.ReLU)
								O5.y[i] = Matrix.activation_ReLU(O5.v[i].values, O5.basicData[i]);
							if (ATCFUN == activateFun.sigmoid)
								O5.y[i] = Matrix.activation_Sigma(O5.v[i].values, O5.basicData[i]);
						}
					}
					else
					{
						//卷积
						conv(O5, S4.y);
					}
					//nSize nnSize = { cnn->O5->inputNum, cnn->O5->outputNum };
					//nnff(cnn->O5->v, O5inData, cnn->O5->wData, cnn->O5->basicData, nnSize);
					 
				
				
				}
			}


			 
		 
		}
		void nnffall(Matrix[] output, float[] input, float[][] wdata, float[] bas, int inc, int outc)
		{
			 

			int i;
			for (i = 0; i < outc; i++)
			{
				if (output[i] == null)
					output[i] = new Matrix(1,1);
				output[i].values[0, 0] = vecMulti(input, wdata[i], inc) + bas[i];
			}
		}
		float vecMulti(float[] vec1, float[] vec2, int vecL)// 两向量相乘
		{
			int i;
			float m = 0;
			for (i = 0; i < vecL; i++)
				m = m + vec1[i] * vec2[i];
			return m;
		}
	}

}
