#include <iostream>
#include <cmath>
#include <cstdlib>
#include <cassert>
using namespace std;

// http://www.cnblogs.com/yeahgis/archive/2012/07/13/2590485.html
// 高斯分布的随机数，均值为0，方差为1
double gaussrand()
{
    static double V1, V2, S;
    static int phase = 0;
    double X;
     
    if ( phase == 0 ) {
        do {
            double U1 = (double)rand() / RAND_MAX;
            double U2 = (double)rand() / RAND_MAX;
             
            V1 = 2 * U1 - 1;
            V2 = 2 * U2 - 1;
            S = V1 * V1 + V2 * V2;
        } while(S >= 1 || S == 0);
         
        X = V1 * sqrt(-2 * log(S) / S);
    } else
        X = V2 * sqrt(-2 * log(S) / S);
         
    phase = 1 - phase;
 
    return X;
}

// 不做内存释放，搞简单一点
typedef shared_ptr<double> DoublePtr;
inline DoublePtr newDoubleArray(int size)
{
	double *p = new double[size];
	return DoublePtr(p, default_delete<double[]>());
}

// 简单的矩阵(复制的时候只复制meta，不复制实际数据)
struct Matrix
{
	int row, col, size;
	DoublePtr data;

	Matrix(int _row=1, int _col=1) : row(_row), col(_col)
	{
		size = row * col;
		data = newDoubleArray(size);
		memset(data.get(), 0, sizeof(double) * size);
	}

	inline double* operator[](int i) {
		assert(i < row);
		return data.get() + i * col;
	}
};

// 打印矩阵内容
ostream& operator<<(ostream& out, Matrix w)
{
	out << "[ (" << w.row << " x " << w.col << ")" << endl;
	for(int i = 0;i < w.row;i++) {
		out << "\t[";
		for(int j = 0;j < w.col;j++) {
			if(j > 0) out << ",";
			out << w[i][j];
		}
		out << "]" << endl;
	}
	out << "]";
	return out;
}

// 简单的向量(复制的时候只复制meta，不复制实际数据)
struct Vector
{
	int size;
	DoublePtr data;

	Vector(int _size=1) : size(_size)
	{
		data = newDoubleArray(size);
		memset(data.get(), 0, sizeof(double) * size);
	}

	inline double &operator[](int x)
	{
		assert(x < size);
		return data.get()[x];
	}
};

// 打印向量内容
ostream& operator<<(ostream& out, Vector v)
{
	out << "[ (" << v.size << ") ";
	for(int i = 0;i < v.size;i++) {
		if(i > 0) out << ",";
		out << v[i];
	}
	out << "]";
	return out;
}

Vector operator*(Matrix w, Vector v)
{
	Vector ret(w.row);
	for(int i = 0;i < w.row;i++) {
		for(int j = 0;j < w.col;j++) {
			ret[i] += w[i][j] * v[j];
		}
	}
	return ret;
}

// 点乘
Vector operator*(Vector x, Vector y)
{
	Vector ret(x.size);
	for(int i = 0;i < x.size;i++) {
		ret[i] = x[i] * y[i];
	}
	return ret;
}

// w转置，然后和v相乘
Vector TandMul(Matrix w, Vector v)
{
	Vector ret(w.col);
	for(int i = 0;i < w.col;i++) {
		for(int j = 0;j < w.row;j++) {
			ret[i] += w[j][i] * v[j];
		}
	}
	return ret;
}

Vector operator+(Vector x, Vector y)
{
	Vector ret(x.size);
	for(int i = 0;i < x.size;i++) {
		ret[i] = x[i] + y[i];
	}
	return ret;
}

Vector operator*(double x, Vector y)
{
	Vector ret(y.size);
	for(int i = 0;i < y.size;i++) {
		ret[i] = x * y[i];
	}
	return ret;
}

Vector operator*(Vector x, double y)
{
	return y * x;
}

// Cost函数
struct CostFun
{
	virtual double calc(Vector x, Vector y)
	{
		return 0;
	}

	virtual double operator()(Vector x, Vector y)
	{
		return calc(x,y);
	}

	virtual Vector propagateDelta(Vector output, Vector y)
	{
		return Vector(output.size);
	}
};

// 方差Cost函数
struct SqrCostFun: CostFun
{
	virtual double calc(Vector x, Vector y)
	{
		double ret = 0;
		for(int i = 0;i < x.size;i++) {
			double t = x[i] - y[i];
			ret += t * t;
		}
		return ret / 2;
	}

	virtual Vector propagateDelta(Vector output, Vector y)
	{
		// -(y - output)
		return -1 * y + output;
	}
};

// 单例
SqrCostFun SqrCostFunSingleton;

// 激活函数
struct Activator
{
	// forward
	virtual double forward(double v) 
	{
		return v;
	}

	virtual double operator()(double v)
	{
		return forward(v);
	}

	virtual Vector operator()(Vector v)
	{
		Vector ret(v.size);
		for(int i = 0;i < v.size;i++) {
			ret[i] = forward(v[i]);
		}
		return ret;
	}

	// 求导数
	virtual double derive(double v)
	{
		return 1;
	}

	virtual Vector derive(Vector v)
	{
		Vector ret(v.size);
		for(int i = 0;i < ret.size;i++) {
			ret[i] = derive(v[i]);
		}
		return ret;
	}
};

// Sigmoid激活函数
struct SigmoidActivator : Activator
{
	virtual double forward(double v)
	{
		return 1 / (1 + exp(-v));
	}

	virtual double derive(double v)
	{
		double t = exp(-v);
		return t / ( (1 + t) * (1 + t) );
	}
};

// 单例
SigmoidActivator SigmoidActivatorSingleton;

// NN的一层
// 1. 输入不算一层
// 2. 层的w矩阵是从前面一层到当前层的w，和NG的定义有些出入
// 3. 层的b是前面一层到当前层的b，和NG的定义有些出入
struct Layer
{
	// 上一层的输出的个数，不包括bias
	int inSize;
	// 当前层的输出
	int outSize;

	Activator &activator;
	Matrix w;
	Vector b;

	void initWeights(double *p, int size)
	{
		// 采用 (0, 0.01)的正太分布初始化
		for(int i = 0;i < size;i++) {
			p[i] = gaussrand() * 0.01;
		}
	}

	Layer(int _inSize=1, int _outSize=1, Activator &_activator= SigmoidActivatorSingleton):
		inSize(_inSize),
		outSize(_outSize),
		w(_outSize, _inSize),
		b(_outSize),
		activator(_activator)
	{
		initWeights(w.data.get(), w.size);
		initWeights(b.data.get(), b.size);
	}

	// 最后一次forward计算之后保存的激活值
	Vector a;
	Vector z;
	// in是上一层的输出
	Vector operator()(Vector in)
	{
		z = w * in + b;
		return a = activator(z);
	}

	// 最后一次反向传播计算之后保存的delta值
	Vector delta;
	Vector propagateDelta()
	{
		return TandMul(w, delta);
	}

	// alpha是学习率
	// prevA是上一层的输出
	void updateParameters(double alpha, Vector prevA)
	{
		b = b + (-alpha) * delta;
		Matrix nw(w.row, w.col);
		for(int i = 0;i < w.row;i++) {
			for(int j = 0;j < w.col;j++) {
				nw[i][j] = w[i][j] - alpha * prevA[j] * delta[i];
			}
		}
		w = nw;
	}

	// 利用NG的办法进行BP算法正确性的检查
	void checkBp(Layer layerList[], int nLayer, Vector input, Vector y, CostFun& costFun, double alpha, Vector prevA)
	{
		Vector forward(Layer layerList[], int nLayer, Vector input);

		// BP算法计算的结果
		Vector db = delta;
		Matrix dw(w.row, w.col);
		for(int i = 0;i < w.row;i++) {
			for(int j = 0;j < w.col;j++) {
				dw[i][j] = prevA[j] * delta[i];
			}
		}

		// NG办法计算的结果
		// 1. 考虑对b增加一个e
		double EPS = 0.0001;
		for(int i = 0;i < b.size;i++) {
			double tmp = b[i];
			// +eps
			b[i] = tmp + EPS;
			Vector output1 = forward(layerList, nLayer, input);
			double err1 = costFun(output1, y);
			// -eps
			b[i] = tmp - EPS;
			Vector output2 = forward(layerList, nLayer, input);
			double err2 = costFun(output2, y);

			// 恢复原值
			b[i] = tmp;

			// NG方法的估算值
			double v = (err1 - err2) / (2 * EPS);
			if(v > 0) {
				double x = fabs( (v-db[i]) / v);
				if(x > 0.0001) {
					cerr << "BP算法结果误差太大了" << endl;
				}
			}
		}

		// 2. 考虑对w增加一个e
		for(int i = 0;i < w.row;i++) {
			for(int j = 0;j < w.col;j++) {
				double tmp = w[i][j];
				// +eps
				w[i][j] = tmp + EPS;
				Vector output1 = forward(layerList, nLayer, input);
				double err1 = costFun(output1, y);
				// -eps
				w[i][j] = tmp - EPS;
				Vector output2 = forward(layerList, nLayer, input);
				double err2 = costFun(output2, y);

				// 恢复原值
				w[i][j] = tmp;

				// NG方法的估算值
				double v = (err1 - err2) / (2 * EPS);
				if(v > 0) {
					double x = fabs( (v-dw[i][j]) / v);
					if(x > 0.0001) {
						cerr << "BP算法结果误差太大了" << endl;
					}
				}
			}
		}
	}
};

ostream& operator<<(ostream& out, Layer& layer)
{
	out << "Layer {" << endl;
	out << "w = " << layer.w << endl;
	out << "b = " << layer.b << endl;
	out << "z = " << layer.z << endl;
	out << "a = " << layer.a << endl;
	out << "delta = " << layer.delta << endl;
	out << "}" << endl;
	return out;
}

Vector forward(Layer layerList[], int nLayer, Vector input)
{
	Vector tmp = input;
	for(int i = 0;i < nLayer;i++) {
		tmp = layerList[i](tmp);
	}
	return tmp;
}

void backward(Layer layerList[], int nLayer, Vector input, Vector y, CostFun& costFun, double alpha)
{
	// 反向传播delta
	Layer &lastLayer = layerList[nLayer - 1];
	// Sqr cost function为例是: -(y - a) f'(z)
	lastLayer.delta = costFun.propagateDelta(lastLayer.a, y) * lastLayer.activator.derive(lastLayer.z);

	for(int i = nLayer - 2;i >= 0;i--) {
		Layer &layer = layerList[i];
		Layer &nextLayer = layerList[i + 1];
		layer.delta = nextLayer.propagateDelta() * layer.activator.derive(layer.z);
	}

	// 检查BP算法正确性（只做一次）
	static bool hasDoneBpChecking = false;
	if(!hasDoneBpChecking) {
		hasDoneBpChecking = true;
		for(int i = 0;i < nLayer;i++) {
			layerList[i].checkBp(layerList, nLayer, input, y, costFun, alpha, i == 0 ? input : layerList[i - 1].a);
		}
	}

	// 更新所有的w和b
	for(int i = 0;i < nLayer;i++) {
		layerList[i].updateParameters(alpha, i == 0 ? input : layerList[i - 1].a);
	}
}

int main()
{
	srand(100);

	// NN网络结构
	Layer layerList[] = {
		Layer(2, 2), // 隐藏层，input size
		Layer(2, 1), // 输出层，output size
	};

	// Cost fun
	CostFun &costFun = SqrCostFunSingleton;

	// 不包括输入层在内的层的个数
	int nLayer = sizeof(layerList) / sizeof(layerList[0]);
	int nInput = layerList[0].inSize;
	int nOuptut = layerList[nLayer - 1].outSize;

	// 测试xor
	int xs[4][2] = {
		{0,0},
		{0,1},
		{1,0},
		{1,1}
	};
	int ys[4] = {
		0,
		1,
		1,
		0
	};

	for(int step = 0;step < 100000;step++) {
		double avgError = 0;
		for(int i = 0;i < 4;i++) {
			Vector x(2);
			for(int j = 0;j < 2;j++) {
				x[j] = xs[i][j];
			}

			Vector y(1);
			y[0] = ys[i];

			Vector output = forward(layerList, nLayer, x);
			double error = SqrCostFunSingleton(output, y);
			avgError += error;

			backward(layerList, nLayer, x, y, SqrCostFunSingleton, 0.1);
		}
		avgError /= 4;

		cout << "after " << step << " steps, error = " << avgError << endl;
	}

	return 0;
}

