#include <iostream>
#include <cstdlib>
#include <cassert>
#include <string>
#include <cstring>
#include <fstream>
#include <vector>
#include <memory>
#include <cstdlib>
#include <cmath>
#include <ctime>
using namespace std;

#define CPU 0
#define GPU 1

#ifndef GPU
#define __host__ 
#define __device__
#define expf exp
#define logf log
#endif

#ifdef GPU
#include <cuda_runtime.h>
#include <math_functions.h>
void CheckCudaReturnCode(cudaError_t code, const char *fileName, int lineNo)
{
	if(code == cudaSuccess) return;
	cerr << "Cuda call failed at " << fileName << ":" << lineNo 
		<< " " << cudaGetErrorString(code) << endl;
	exit(-1);
}
#define CK(x) CheckCudaReturnCode((x), __FILE__, __LINE__)


inline size_t get_thread_cnt(size_t size)
{
	size = min(size, 1024UL);

	// ceil(size / 32)
	size += 31;
	size &= ~31;
	return size;
}

inline size_t ceil(size_t x, size_t y)
{
	return (x + y - 1) / y;
}
#endif

// http://www.cnblogs.com/yeahgis/archive/2012/07/13/2590485.html
// 高斯分布的随机数，均值为0，方差为1
double gaussrand()
{
    static double V1, V2, S;
    static int phase = 0;
    double X;
     
    if ( phase == 0 ) {
        do {
            double U1 = (double)rand() / RAND_MAX;
            double U2 = (double)rand() / RAND_MAX;
             
            V1 = 2 * U1 - 1;
            V2 = 2 * U2 - 1;
            S = V1 * V1 + V2 * V2;
        } while(S >= 1 || S == 0);
         
        X = V1 * sqrt(-2 * log(S) / S);
    } else
        X = V2 * sqrt(-2 * log(S) / S);
         
    phase = 1 - phase;
 
    return X;
}

float gaussrand_f()
{
	return float(gaussrand() * 0.01);
}

template<size_t IS_GPU>
struct Matrix {};

template<>
struct Matrix<CPU>
{
	size_t row, col, size;
	float *data;

	Matrix(size_t _row, size_t _col) :
		row(_row), col(_col), size(_row * _col)
		{
			assert(row > 0 && col > 0);
			data = (float *)malloc(size * sizeof(float));
			assert(data);
			memset(data, 0, row * col * sizeof(float));
		}

	Matrix(size_t _row, size_t _col, float *_data) :
		row(_row), col(_col), size(_row * _col), data(_data)
		{
			assert(row > 0 && col > 0 && data);
		}

	Matrix splice(size_t from, size_t to)
	{
		assert(from < to && to <= row);
		size_t offset = from * col;

		Matrix<CPU> ret(to - from, col, data + offset);
		return ret;
	}

	string shape() const {
		char buf[100];
		sprintf(buf, "%d x %d", (int)row, (int)col);
		return buf;
	}

	void free()
	{
		assert(data);
		::free(data);
		data = NULL;
	}

	void init(float v)
	{
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				(*this)[i][j] = v;
			}
		}
	}

	void init_gauss()
	{
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				(*this)[i][j] = gaussrand_f();
			}
		}
	}

	float *operator[](size_t i) {
		assert(i < row);
		return data + i * col;
	}

	const float *operator[] (size_t i) const {
		assert(i < row);
		return data + i * col;
	}

	void assert_eq(const Matrix& rhs)
	{
		assert(row == rhs.row);
		assert(col == rhs.col);
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				float x = (*this)[i][j];
				float y = rhs[i][j];
				float d =  x - y;
				if(d < 0) d = -d;
				if(d > 1E-7) {
					cerr << "[" << i << "," << j << "] delta=" << d << " x=" << x << " y=" << y << endl;
					exit(-1);
				}
			}
		}
	}

	size_t max_idx() const {
		assert(row == 1);
		size_t ret = 0;
		for(int i = 1;i < col;i++) {
			if((*this)[0][i] > (*this)[0][ret]) {
				ret = i;
			}
		}
		return ret;
	}

	float sum() const {
		float ret = 0;
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				ret += (*this)[i][j];
			}
		}
		return ret;
	}

	void mul_to(const Matrix& in, Matrix& out) const {
		// out x in
		// b x in
		// b x out
		assert(row == out.col && col == in.col && in.row == out.row);
		for(int b = 0;b < in.row;b++) {
			for(int i = 0;i < row;i++) {
				out[b][i] = 0;
				for(int j = 0;j < col;j++) {
					out[b][i] += (*this)[i][j] * in[b][j];
				}
			}
		}
	}

	Matrix t() const {
		Matrix ret(col, row);
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				ret[j][i] = (*this)[i][j];
			}
		}
		return ret;
	}

	void t_and_mul_to(const Matrix& in, Matrix& out) const {
		// out x in
		// b x out
		// b x in
		assert(row == in.col && col == out.col && in.row == out.row);
		for(int b = 0;b < in.row;b++) {
			for(int j = 0;j < col;j++) {
				out[b][j] = 0;
				for(int i = 0;i < row;i++) {
					out[b][j] += (*this)[i][j] * in[b][i];
				}
			}
		}
	}

	void cast_add_to(Matrix& out) const {
		// 1 x out
		// b x out
		assert(row == 1 && col == out.col);
		for(int b = 0;b < out.row;b++) {
			for(int j = 0;j < col;j++) {
				out[b][j] += (*this)[0][j];
			}
		}
	}

	void grad(float f, Matrix& delta) {
		// 1 x out
		// b x out
		assert(row == 1 && col == delta.col);
		for(int j = 0;j < col;j++) {
			float sum = 0;
			for(int b = 0;b < delta.row;b++) {
				sum += delta[b][j];
			}
			sum *= f;
			(*this)[0][j] += sum;
		}
	}

	void grad(float f, const Matrix& delta, const Matrix& prev_a) {
		// out x in
		// b x out
		// b x in
		assert(row == delta.col && col == prev_a.col);
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				float sum = 0;
				for(int b = 0;b < delta.row;b++) {
					sum += delta[b][i] * prev_a[b][j];
				}
				sum *= f;
				(*this)[i][j] += sum;
			}
		}
	}

	Matrix to_cpu() const {
		Matrix ret(row, col);
		memcpy(ret.data, data, size * sizeof(float));
		return ret;
	}

#ifdef GPU
	Matrix<GPU> to_gpu() const;
#endif
};

ostream& operator<<(ostream& out, const Matrix<CPU>& v)
{
	out << "[(" << v.row << " x " << v.col << ") " << endl;
	for(int i = 0;i < v.row;i++) {
		out << "\t";
		for(int j = 0;j < v.col;j++) {
			if(j > 0) cout << ",";
			out << v[i][j];
		}
		out << endl;
	}
	out << "]";
	return out;
}

#ifdef GPU
class SmartGPUMem
{
private:
	float *m_gpu;

public:
	SmartGPUMem()
	{
		CK(cudaMalloc((void **)&m_gpu, sizeof *m_gpu));
		assert(m_gpu);
	}

	float *gpu() { return m_gpu; }
	float cpu()
	{
		float t;
		CK(cudaMemcpy(&t, m_gpu, sizeof t, cudaMemcpyDeviceToHost));
		return t;
	}

	~SmartGPUMem()
	{
		CK(cudaFree(m_gpu));
	}
};

__global__ void k_sum(const float *data, size_t row, size_t col, float *out) {
	float t = 0;
	for(int i = 0;i < row;i++) {
		for(int j = 0;j < col;j++) {
			t += data[i * col + j];
		}
	}
	*out = t;
}

__global__ void k_max_idx(const float *data, size_t size, float *out) {
	int t = 0;
	for(int i = 1;i < size;i++) {
		if(data[i] > data[t]) {
			t = i;
		}
	}
	*out = t;
}

__global__ void k_mul_to(const float * w, const float *in, float *out, size_t row, size_t col, size_t batch_size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	int b = gidx / row;
	int i = gidx % row;

	if(b >= batch_size) return;

	float t = 0;
	for(int j = 0;j < col;j++) {
		t += w[i * col + j] * in[b * col + j];
	}
	out[b * row + i] = t;
}

__global__ void k_t_and_mul_to(const float *w, const float *in, float *out, size_t row, size_t col, size_t batch_size) 
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	int b = gidx / col;
	int j = gidx % col;

	if(b >= batch_size) return;

	float t = 0;
	for(int i = 0;i < row;i++) {
		t += w[i * col + j] * in[b * row + i];
	}
	out[b * col + j] = t;
}

__global__ void k_cast_add_to(const float* bx, float *out, size_t col, size_t batch_size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	int b = gidx / col;
	int j = gidx % col;

	if(b >= batch_size) return;
	out[b * col + j] += bx[j];
}

__global__ void k_grad(float *b, float f, const float *delta, size_t col, size_t batch_size)
{
	size_t j = blockDim.x * blockIdx.x + threadIdx.x;
	if(j >= col) return;

	float sum = 0;
	for(int b = 0;b < batch_size;b++) {
		sum += delta[b * col + j];
	}
	sum *= f;
	b[j] += sum;
}

__global__ void k_grad(float *w, float f, const float *delta, const float *prev_a, size_t row, size_t col, size_t batch_size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	size_t i = gidx / col;
	size_t j = gidx % col;
	if(i >= row) return;

	float sum = 0;
	for(int b = 0;b < batch_size;b++) {
		sum += delta[b * row + i] * prev_a[b * col + j];
	}
	sum *= f;
	w[i * col + j] += sum;
}

template<>
struct Matrix<GPU>
{
	size_t row, col, size;
	float *data;

	Matrix(size_t _row, size_t _col) :
		row(_row), col(_col), size(_row * _col)
		{
			assert(row > 0 && col > 0);
			CK(cudaMalloc((void **)&data, size * sizeof(float)));
			assert(data);

			float *tmp = (float*)malloc(size * sizeof(float));
			assert(tmp);
			memset(tmp, 0, size * sizeof(float));
			CK(cudaMemcpy(data, tmp, size * sizeof(float), cudaMemcpyHostToDevice));
			::free(tmp);
		}

	Matrix(size_t _row, size_t _col, float *_data) :
		row(_row), col(_col), size(_row * _col), data(_data)
		{
			assert(row > 0 && col > 0 && data);
		}

	Matrix splice(size_t from, size_t to)
	{
		assert(from < to && to <= row);
		size_t offset = from * col;

		Matrix ret(to - from, col, data + offset);
		return ret;
	}

	string shape() const {
		char buf[100];
		sprintf(buf, "%d x %d", (int)row, (int)col);
		return buf;
	}

	void free()
	{
		assert(data);
		CK(cudaFree(data));
		data = NULL;
	}

	void init(float v)
	{
		float *tmp = (float*)malloc(size * sizeof(float));
		assert(tmp);
		for(int i = 0;i < size;i++) {
			tmp[i] = v;
		}
		CK(cudaMemcpy(data, tmp, size * sizeof(float), cudaMemcpyHostToDevice));
		::free(tmp);
	}

	void init_gauss()
	{
		float *tmp = (float*)malloc(size * sizeof(float));
		assert(tmp);
		for(int i = 0;i < size;i++) {
			tmp[i] = gaussrand_f();
		}
		CK(cudaMemcpy(data, tmp, size * sizeof(float), cudaMemcpyHostToDevice));
		::free(tmp);
	}

	__device__ float *operator[](size_t i) {
		assert(i < row);
		return data + i * col;
	}

	__device__ const float *operator[] (size_t i) const {
		assert(i < row);
		return data + i * col;
	}

	size_t max_idx() const {
		assert(row == 1);
		SmartGPUMem mem;
		k_max_idx<<<1,1>>>(data, col, mem.gpu());
		return mem.cpu();
	}

	float sum() const {
		SmartGPUMem mem;
		k_sum<<<1,1>>>(data, row, col, mem.gpu());
		return mem.cpu();
	}

	void mul_to(const Matrix& in, Matrix& out) const {
		// out x in
		// b x in
		// b x out
		assert(row == out.col && col == in.col && in.row == out.row);
		size_t thread_cnt = get_thread_cnt(in.row * row);
		size_t block_cnt = ceil(in.row * row, thread_cnt);

		k_mul_to<<<block_cnt, thread_cnt>>>(data, in.data, out.data, row, col, in.row);

		/*
		for(int b = 0;b < in.row;b++) {
			for(int i = 0;i < row;i++) {
				out[b][i] = 0;
				for(int j = 0;j < col;j++) {
					out[b][i] += (*this)[i][j] * in[b][j];
				}
			}
		}
		*/
	}


	void t_and_mul_to(const Matrix& in, Matrix& out) const {
		// out x in
		// b x out
		// b x in
		assert(row == in.col && col == out.col && in.row == out.row);
		size_t thread_cnt = get_thread_cnt(in.row * col);
		size_t block_cnt = ceil(in.row * col, thread_cnt);

		k_t_and_mul_to<<<block_cnt, thread_cnt>>>(data, in.data, out.data, row, col, in.row);
		/*
		for(int b = 0;b < in.row;b++) {
			for(int j = 0;j < col;j++) {
				out[b][j] = 0;
				for(int i = 0;i < row;i++) {
					out[b][j] += (*this)[i][j] * in[b][i];
				}
			}
		}
		*/
	}

	void cast_add_to(Matrix& out) const {
		// 1 x out
		// b x out
		assert(row == 1 && col == out.col);
		size_t thread_cnt = get_thread_cnt(out.row * col);
		size_t block_cnt = ceil(out.row * col, thread_cnt);

		k_cast_add_to<<<block_cnt, thread_cnt>>>(data, out.data, col, out.row);
		/*
		for(int b = 0;b < out.row;b++) {
			for(int j = 0;j < col;j++) {
				out[b][j] += (*this)[0][j];
			}
		}
		*/
	}

	void grad(float f, const Matrix& delta) {
		// 1 x out
		// b x out
		assert(row == 1 && col == delta.col);
		size_t thread_cnt = get_thread_cnt(col);
		size_t block_cnt = ceil(col, thread_cnt);

		k_grad<<<block_cnt, thread_cnt>>>(data, f, delta.data, col, delta.row);
		/*
		for(int j = 0;j < col;j++) {
			float sum = 0;
			for(int b = 0;b < delta.row;b++) {
				sum += delta[b][j];
			}
			sum *= f;
			(*this)[0][j] += sum;
		}
		*/
	}

	void grad(float f, const Matrix& delta, const Matrix& prev_a) {
		// out x in
		// b x out
		// b x in
		assert(row == delta.col && col == prev_a.col);
		size_t thread_cnt = get_thread_cnt(row * col);
		size_t block_cnt = ceil(row * col, thread_cnt);

		k_grad<<<block_cnt, thread_cnt>>>(data, f, delta.data, prev_a.data, row, col, delta.row);
		/*
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				float sum = 0;
				for(int b = 0;b < delta.row;b++) {
					sum += delta[b][i] * prev_a[b][j];
				}
				sum *= f;
				(*this)[i][j] += sum;
			}
		}
		*/
	}

	Matrix<CPU> to_cpu() const {
		Matrix<CPU> ret(row, col);
		CK(cudaMemcpy(ret.data, data, size * sizeof(float), cudaMemcpyDeviceToHost));
		return ret;
	}
};

Matrix<GPU> Matrix<CPU>::to_gpu() const {
	Matrix<GPU> ret(row, col);
	CK(cudaMemcpy(ret.data, data, size * sizeof(float), cudaMemcpyHostToDevice));
	return ret;
}
#endif

template<size_t IS_GPU>
struct Softmax{};

__host__ __device__ float softmax_calc(const float *x, const float *y, size_t size)
{
	/*
	  log( exp(x_j) / \sum exp(x_k) )
	= x_j - log \sum exp(x_k)
	= x_j - (max + log \sum exp(x_k - max))
	*/
	float maxX = x[0];
	for(int i = 0;i < size;i++) {
		if(x[i] > maxX) {
			maxX = x[i];
		}
	}

	float xSum = 0;
	for(int i = 0;i < size;i++) {
		xSum += expf(x[i] - maxX);
	}

	float ret = 0;
	for(int i = 0;i < size;i++) {
		ret += y[i] * (x[i] - (maxX + logf(xSum)));
	}

	return -ret;
}

__host__ __device__ void softmax_propagate_delta(const float *x, const float *y, float *z, size_t size, float f)
{
	/*
	  - d \sum y_j * log( exp(x_j) / \sum exp(x_k) )
	= - d \sum y_j * x_j - d \sum y_j log (\sum exp(x_k) )
	= - y_i + \sum (y_j * exp(x_i) / \sum exp(x_k))
	= - y_i + exp(x_i) (\sum y_j) / (\sum exp(x_k))
	*/

	float maxX = x[0];
	for(int i = 0;i < size;i++) {
		if(x[i] > maxX) {
			maxX = x[i];
		}
	}

	// y - exp(x) sum_of_y / sum_of_exp(x)
	float sumOfY = 0;
	float sumOfX = 0;
	for(int i = 0;i < size;i++) {
		z[i] = expf(x[i] - maxX);
		sumOfY += y[i];
		sumOfX += z[i];
	}

	float yx = sumOfY/sumOfX;
	for(int i = 0;i < size;i++) {
		z[i] = yx * z[i] - y[i];
	}

	for(int i = 0;i < size;i++) {
		z[i] *= f;
	}
}

template<>
struct Softmax<CPU>
{
	// - \sum y_j * log( exp(x_j) / \sum exp(x_k) )
	void calc(const Matrix<CPU> &x, const Matrix<CPU> &y, Matrix<CPU>& out) const
	{
		assert(x.row == y.row && x.col == y.col);
		assert(out.row == 1 && out.col == x.row);
		for(int i = 0;i < x.row;i++) {
			out[0][i] = softmax_calc(x[i], y[i], x.col);
		}
	} 

	void propagate_delta(const Matrix<CPU> &x, const Matrix<CPU> &y, Matrix<CPU>& z) const
	{
		assert(x.row == y.row && x.col == y.col);
		assert(x.row == z.row && x.col == z.col);

		for(int i = 0;i < x.row;i++) {
			softmax_propagate_delta(x[i], y[i], z[i], x.col, 1.0/x.row);
		}
	}
};

#ifdef GPU
__global__ void k_calc(const float *x, const float *y, float *out, size_t row, size_t col)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= row) return;
	size_t i = gidx;

	out[i] = softmax_calc(x + i * col, y + i * col, col);
}

__global__ void k_propagate_delta(const float *x, const float *y, float *z, size_t row, size_t col, float f)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= row) return;
	size_t i = gidx;

	softmax_propagate_delta(x + col * i, y + col * i, z + col * i, col, f);
}

template<>
struct Softmax<GPU>
{
	void calc(const Matrix<GPU> &x, const Matrix<GPU> &y, Matrix<GPU>& out) const
	{
		assert(x.row == y.row && x.col == y.col);
		assert(out.row == 1 && out.col == x.row);

		size_t thread_cnt = get_thread_cnt(x.row);
		size_t block_cnt = ceil(x.row, thread_cnt);
		k_calc<<<block_cnt, thread_cnt>>>(x.data, y.data, out.data, x.row, x.col);
		/*
		for(int i = 0;i < x.row;i++) {
			out[0][i] = softmax_calc(x[i], y[i], x.col);
		}
		*/
	}

	void propagate_delta(const Matrix<GPU> &x, const Matrix<GPU> &y, Matrix<GPU>& z) const
	{
		assert(x.row == y.row && x.col == y.col);
		assert(x.row == z.row && x.col == z.col);

		size_t thread_cnt = get_thread_cnt(x.row);
		size_t block_cnt = ceil(x.row, thread_cnt);
		k_propagate_delta<<<block_cnt, thread_cnt>>>(x.data, y.data, z.data, x.row, x.col, 1.0/x.row);
		/*
		for(int i = 0;i < x.row;i++) {
			softmax_propagate_delta(x[i], y[i], z[i], x.col, 1.0/x.row);
		}
		*/
	}
};
#endif

template<size_t IS_GPU>
struct Relu{};

template<>
struct Relu<CPU>
{
	void forward(const Matrix<CPU>& x, Matrix<CPU> &y) const
	{
		assert(x.row == y.row && x.col == y.col);
		for(int i = 0;i < x.row;i++) {
			for(int j = 0;j < x.col;j++) {
				float t = x[i][j];
				y[i][j] = t > 0 ? t : 0;
			}
		}
	}

	void derive_and_dot_into(const Matrix<CPU>& x, Matrix<CPU> &y) const
	{
		assert(x.row == y.row && x.col == y.col);
		for(int i = 0;i < x.row;i++) {
			for(int j = 0;j < x.col;j++) {
				float t = x[i][j];
				y[i][j] *= t > 0 ? 1 : 0;
			}
		}
	}
};

#ifdef GPU
__global__ void k_forward(const float *x, float *y, size_t row, size_t col)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	int i = gidx / col;
	int j = gidx % col;
	if(i >= row) return;

	float t = x[i * col + j];
	y[i * col + j] = t > 0 ? t : 0;
}

__global__ void k_derive_and_dot_into(const float *x, float *y, size_t row, size_t col)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	int i = gidx / col;
	int j = gidx % col;
	if(i >= row) return;

	float t = x[i * col + j];
	y[i * col + j] *= t > 0 ? 1 : 0;
}


template<>
struct Relu<GPU>
{
	void forward(const Matrix<GPU>& x, Matrix<GPU> &y) const
	{
		assert(x.row == y.row && x.col == y.col);
		size_t thread_cnt = get_thread_cnt(x.row * x.col);
		size_t block_cnt = ceil(x.row * x.col, thread_cnt);
		k_forward<<<block_cnt, thread_cnt>>>(x.data, y.data, x.row, x.col);
		/*
		for(int i = 0;i < x.row;i++) {
			for(int j = 0;j < x.col;j++) {
				float t = x[i][j];
				y[i][j] = t > 0 ? t : 0;
			}
		}
		*/
	}

	void derive_and_dot_into(const Matrix<GPU>& x, Matrix<GPU> &y) const
	{
		assert(x.row == y.row && x.col == y.col);
		size_t thread_cnt = get_thread_cnt(x.row * x.col);
		size_t block_cnt = ceil(x.row * x.col, thread_cnt);
		k_derive_and_dot_into<<<block_cnt, thread_cnt>>>(x.data, y.data, x.row, x.col);
		/*
		for(int i = 0;i < x.row;i++) {
			for(int j = 0;j < x.col;j++) {
				float t = x[i][j];
				y[i][j] *= t > 0 ? 1 : 0;
			}
		}
		*/
	}
};
#endif

template<size_t IS_GPU>
struct Layer
{
	size_t in_size, out_size, batch_size;
	Matrix<IS_GPU> w, b;
	Relu<IS_GPU> relu;

	// z = w * in + b
	// a = relu(z)
	Matrix<IS_GPU> z, a, delta;

	Layer(size_t _in_size, size_t _out_size, size_t _batch_size=1) :
		in_size(_in_size), out_size(_out_size), batch_size(_batch_size),
		w(_out_size, _in_size), b(1, _out_size),
		z(_batch_size, _out_size), a(_batch_size, _out_size), 
		delta(_batch_size, _out_size)
	{
		assert(in_size > 0);
		assert(out_size > 0);
		assert(batch_size > 0);
		w.init_gauss();
		b.init_gauss();
	}

	void reset_batch_size(size_t _batch_size) 
	{
		assert(_batch_size > 0);
		if(_batch_size == batch_size) return;
		batch_size = _batch_size;

		z.free(); 
		a.free(); 
		delta.free();
		z = Matrix<IS_GPU>(batch_size, out_size);
		a = Matrix<IS_GPU>(batch_size, out_size);
		delta = Matrix<IS_GPU>(batch_size, out_size);
	}

	void calc(const Matrix<IS_GPU>& in)
	{
		assert(in.row == batch_size);
		assert(in.col == in_size);

		// out x in
		// batch x in
		// batch x out
		w.mul_to(in, z);

		// 1 x out
		// batch x out
		b.cast_add_to(z);

		// batch x out
		// batch x out
		relu.forward(z, a);
	}

	void propagate_delta(Matrix<IS_GPU>& out)
	{
		assert(out.row == batch_size);
		assert(out.col == in_size);

		// out x in
		// batch x out
		// batch x in
		w.t_and_mul_to(delta, out);
	}

	void update_parameters(float alpha, const Matrix<IS_GPU> &prev_a)
	{
		assert(prev_a.row == batch_size);
		assert(prev_a.col == in_size);

		// 1 x out
		// 1 x out
		b.grad(-alpha, delta);

		// out x in
		// batch x out
		// batch x in
		w.grad(-alpha, delta, prev_a);
	}
};

int MsbInt(char buf[])
{
	int base = 1;
	int ret = 0;
	for(int i = 3;i >= 0;i--) {
		ret += (unsigned char)buf[i] * base;
		base *= 256;
	}
	return ret;
}

vector<int> ReadMnistLabels(string fileName)
{
	vector<int> ret;
	ifstream ifs(fileName.c_str(), ios::binary);
	char buf[1000];

	// MAGIC
	ifs.read(buf, 4);
	int magic = MsbInt(buf);
	assert(magic == 0x00000801);

	// num of images
	ifs.read(buf, 4);
	int nImages = MsbInt(buf);

	while(nImages--) {
		ret.push_back(ifs.get());
	}

	return ret;
}

Matrix<CPU> ReadMnistData(const string& fileName)
{
	ifstream ifs(fileName.c_str(), ios::binary);
	char buf[1000];

	// MAGIC
	ifs.read(buf, 4);
	int magic = MsbInt(buf);
	assert(magic == 0x00000803);

	// num of images
	ifs.read(buf, 4);
	int nImages = MsbInt(buf);

	int row, col;
	ifs.read(buf, 4);
	row = MsbInt(buf);
	ifs.read(buf, 4);
	col = MsbInt(buf);
	assert(row == 28 && col == 28);

	size_t size = row * col;

	Matrix<CPU> ret(nImages, size);

	for(int k = 0;k < nImages;k++) {
		for(int i = 0;i < row * col;i++) {
			ret[k][i] = ifs.get() / 256.0; // 归一化
		}
	}

	return ret;
}

Matrix<CPU> translateLabels(const vector<int>& labels, int k=10) 
{
	Matrix<CPU> ret(labels.size(), k);

	for(int i = 0;i < labels.size();i++) {
		assert(labels[i] >= 0 && labels[i] < k);
		ret[i][labels[i]] = 1;
	}
	return ret;
}

template<size_t IS_GPU>
void reset_model_batch_size(vector<Layer<IS_GPU> >&model, size_t batch_size)
{
	for(int i = 0;i < model.size();i++) {
		model[i].reset_batch_size(batch_size);
	}
}

template<size_t IS_GPU>
vector<bool> forward(
	vector<Layer<IS_GPU> >& model,
	const Matrix<IS_GPU>& in, 
	const Matrix<IS_GPU>& label, 
	const vector<int>& raw_label, 
	const Softmax<IS_GPU>& s,
	float& error)
{
	size_t batch_size = in.row;
	assert(model[0].batch_size == batch_size);
	assert(label.row == batch_size);
	assert(raw_label.size() == batch_size);

	size_t nLayer = model.size();

	for(int j = 0;j < nLayer;j++) {
		Layer<IS_GPU> &layer = model[j];
		if(j == 0) {
			layer.calc(in);
		} else {
			layer.calc(model[j-1].a);
		}
	}

	Layer<IS_GPU> &lastLayer = model[nLayer - 1];
	Matrix<IS_GPU> error2(1, batch_size);
	s.calc(lastLayer.a, label, error2);
	error = error2.sum() / batch_size;
	error2.free();

	Matrix<CPU> cpu_a = lastLayer.a.to_cpu();
	vector<bool> ret(batch_size);
	for(int i = 0;i < batch_size;i++) {
		ret[i] = cpu_a.splice(i,i + 1).max_idx() == raw_label[i];
	}
	cpu_a.free();
	return ret;
}

template<size_t IS_GPU>
void run(Matrix<IS_GPU> &train_data,
		Matrix<IS_GPU> &train_label,
		vector<int> &raw_train_label,

		Matrix<IS_GPU> &test_data,
		Matrix<IS_GPU> &test_label,
		vector<int> &raw_test_label,

		vector<Layer<IS_GPU> >& model,
		size_t batch_size
	)
{
	clock_t start = clock();

	size_t M = train_data.row;
	size_t T = test_data.row;

	Softmax<IS_GPU> s;
	float avg_error = 0;
	float error;
	for(int i = 0;i < M;i += batch_size) {
		size_t this_batch_size = std::min(batch_size, M - i);
		reset_model_batch_size(model, this_batch_size);
		Matrix<IS_GPU> this_batch_train_data = train_data.splice(i, i + this_batch_size);
		Matrix<IS_GPU> this_batch_train_label = train_label.splice(i, i + this_batch_size);
		vector<int> this_batch_raw_train_label(raw_train_label.begin() + i, raw_train_label.begin() + i + this_batch_size);

		/*
		cout << this_batch_train_data.shape() << endl;
		cout << this_batch_train_label.shape() << endl;
		cout << this_batch_raw_train_label.size() << endl;
		*/

		forward<IS_GPU>(
			model, 
			this_batch_train_data,
			this_batch_train_label,
			this_batch_raw_train_label,
			s,
			error
			);
		avg_error += error * this_batch_size;

		// backward
		for(int j = model.size() - 1;j >= 0;j--) {
			Layer<IS_GPU> &layer = model[j];
			if(j == model.size() - 1) {
				s.propagate_delta(layer.a, this_batch_train_label, layer.delta);
			} else {
				model[j + 1].propagate_delta(layer.delta);
			}
			layer.relu.derive_and_dot_into(layer.a, layer.delta);
		}

		for(int j = 0;j < model.size();j++) {
			model[j].update_parameters(0.001, j == 0 ? this_batch_train_data : model[j-1].a);
		}
	}
	avg_error /= M;

	clock_t mid = clock();
	cout << "\ttime=" << ((mid-start)*1.0/CLOCKS_PER_SEC) << " error=" << avg_error << endl;

	// 测试
	reset_model_batch_size(model, M);
	vector<bool> is_good = forward(model, 
		train_data.splice(0, M), 
		train_label.splice(0, M), 
		vector<int>(raw_train_label.begin(), raw_train_label.begin() + M),
		s, error);
	size_t total = 0, good = 0;
	for(int i = 0;i < M;i++) {
		if(is_good[i]) good++;
		total++;
	}
	cout << "\ttrain_accuracy=" << (good*1.0/total) << " ";

	total = good = 0;
	reset_model_batch_size(model, T);
	is_good = forward(model, 
		test_data.splice(0, T),
		test_label.splice(0, T),
		vector<int>(raw_test_label.begin(), raw_test_label.begin() + T),
		s, error);
	for(int i = 0;i < T;i++) {
		if(is_good[i]) good++;
		total++;
	}
	cout << "test_accuracy=" << (good*1.0/total) << " ";

	clock_t end = clock();
	cout << "total_time=" << (end-start)*1.0/CLOCKS_PER_SEC << endl;
}

void test()
{
	/*
	0 1
	1 2
	2 3
	*/
	Matrix<CPU> a(3, 2);
	for(int i = 0;i < a.row;i++) {
		for(int j = 0;j < a.col;j++) {
			a[i][j] = i + j;
		}
	}
	//Matrix splice(size_t from, size_t to) {
	Matrix<CPU> t = a.splice(1,2);
	assert(t.row == 1 && t.col == 2 && t[0][0] == 1);

	//string shape() const {
	assert(a.shape() == "3 x 2");

	//void free() {
	t = Matrix<CPU>(1,1);
	t.free();
	assert(!t.data);

	//void init(float v) {
	t = Matrix<CPU>(2,2);
	t.init(2);
	assert(t[1][1] == 2);

	//void init_gauss() {
	t = Matrix<CPU>(2,2);
	assert(t[1][1] == 0);
	t.init_gauss();
	assert(t[1][1] != 0);

	//float *operator[](size_t i) {
	//const float *operator[] (size_t i) const {

	//void assert_eq(const Matrix& rhs)
	t = Matrix<CPU>(2,2);
	Matrix<CPU> t2(2,2);
	t.assert_eq(t2);

	//size_t max_idx() const {
	assert(a.splice(0,1).max_idx() == 1);

	//float sum() const {
	assert(a.splice(1,2).sum() == 3);

	//void mul_to(const Matrix& in, Matrix& out) const {
	/*
	0 1
	1 2
	2 3

	 x

	1 2
	3 4

	 =

    2 5 8
    4 11 18
	*/
	Matrix<CPU> b(2, 2);
	b[0][0] = 1; b[0][1] = 2;
	b[1][0] = 3; b[1][1] = 4;
	Matrix<CPU> out(2, 3);
	a.mul_to(b, out);
	assert(out[0][0] == 2 && out[0][1] == 5 && out[0][2] == 8);
	assert(out[1][0] == 4 && out[1][1] == 11 && out[1][2] == 18);


	//void t_and_mul_to(const Matrix& in, Matrix& out) const {
	/*
	0 1
	1 2 -> 0 1 2
	2 3    1 2 3

	 x

	1 2 3
	4 5 6

	 =

	8 14
	17 32
	*/
	//Matrix<CPU> ap = a.t();
	b = Matrix<CPU>(2, 3);
	b[0][0] = 1; b[0][1] = 2; b[0][2] = 3;
	b[1][0] = 4; b[1][1] = 5; b[1][2] = 6;
	out = Matrix<CPU>(2, 2);
	a.t_and_mul_to(b, out);
	//cout << out << endl;
	//Matrix<CPU> out2(2, 2);
	//ap.mul_to(b, out2);
	//cout << out2 << endl;
	//out.assert_eq(out2);
	assert(out[0][0] == 8 && out[0][1] == 14);
	assert(out[1][0] == 17 && out[1][1] == 32);

	//void cast_add_to(Matrix& out) const {
	/*
	1 2
	+
	1 2
	3 4
	=
	2 4
	4 6
	*/
	b = Matrix<CPU>(1, 2);
	b[0][0] = 1; b[0][1] = 2;
	out = Matrix<CPU>(2,2);
	out[0][0] = 1; out[0][1] = 2;
	out[1][0] = 3; out[1][1] = 4;
	b.cast_add_to(out);
	assert(out[0][0] == 2 && out[0][1] == 4);
	assert(out[1][0] == 4 && out[1][1] == 6);

	//void grad(float f, Matrix& delta) {
	/*
	1 2
	grad with f = -0.5
	1 2
	3 4
	=
	-1 -1
	*/
	b = Matrix<CPU>(1, 2);
	b[0][0] = 1; b[0][1] = 2;
	out = Matrix<CPU>(2,2);
	out[0][0] = 1; out[0][1] = 2;
	out[1][0] = 3; out[1][1] = 4;
	b.grad(-0.5, out);
	assert(b[0][0] == -1 && b[0][1] == -1);

	//void grad(float f, const Matrix& delta, const Matrix& prev_a) {
	/*
	0 1
	1 2
	2 3

	grad with f = -0.5

	delta=
	1 2 3 -> 1
	4 5 6    2
	         3

	prev_a=
	7 8 -> 7
	9 10   8

	=

	[0][1] = 1 - 0.5 * (1 * 8 + 4 * 10) = 1 - 0.5 * 48 = 1 - 24 = -23
	*/
	Matrix<CPU> w(3, 2);
	w[0][0] = 0; w[0][1] = 1;
	w[1][0] = 1; w[1][1] = 2;
	w[2][0] = 2; w[2][1] = 3;

	Matrix<CPU> delta(2, 3);
	delta[0][0] = 1; delta[0][1] = 2; delta[0][2] = 3;
	delta[1][0] = 4; delta[1][1] = 5; delta[1][2] = 6;

	Matrix<CPU> prev_a(2, 2);
	prev_a[0][0] = 7; prev_a[0][1] = 8;
	prev_a[1][0] = 9; prev_a[1][1] = 10;

	w.grad(-0.5, delta, prev_a);
	//cout << w << endl;
	assert(w[0][1] == -23);

	//Matrix to_cpu() const {
	t = a.to_cpu();
	t.assert_eq(a);
}

int main()
{
	cout << "Testing" << endl;
	test();
	//return 0;

	cout << "Loading data" << endl;
	// 读取数据
	vector<int> raw_train_label = ReadMnistLabels("mnist/train-labels-idx1-ubyte");
	//assert(raw_train_label.size() == 60000);
	Matrix<CPU> cpu_train_data = ReadMnistData("mnist/train-images-idx3-ubyte");
	//assert(cpu_train_data.row == 60000 && cpu_train_data.col == 28 * 28);
	Matrix<CPU> cpu_train_label = translateLabels(raw_train_label);
	//assert(cpu_train_label.row == 60000 && cpu_train_label.col == 10);
#ifdef GPU
	Matrix<GPU> gpu_train_data(cpu_train_data.to_gpu());
	Matrix<GPU> gpu_train_label(cpu_train_label.to_gpu());
#endif


	vector<int> raw_test_label = ReadMnistLabels("mnist/t10k-labels-idx1-ubyte");
	//assert(raw_test_label.size() == 10000);
	Matrix<CPU> cpu_test_data = ReadMnistData("mnist/t10k-images-idx3-ubyte");
	//assert(cpu_test_data.row == 10000 && cpu_test_data.col == 28 * 28);
	Matrix<CPU> cpu_test_label = translateLabels(raw_test_label);
	//assert(cpu_test_label.row == 10000 && cpu_test_label.col == 10);
#ifdef GPU
	Matrix<GPU> gpu_test_data(cpu_test_data.to_gpu());
	Matrix<GPU> gpu_test_label(cpu_test_label.to_gpu());
#endif

	size_t n_input = cpu_train_data.col;
	size_t n_output = 10;
	size_t n_mid = 100;
	size_t batch_size = 256;

	cout << "Now run" << endl;
	srand(1000);
	vector<Layer<CPU> > cpu_model;
	cpu_model.push_back(Layer<CPU>(n_input, n_mid));
	cpu_model.push_back(Layer<CPU>(n_mid, n_output));
#ifdef GPU
	srand(1000);
	vector<Layer<GPU> > gpu_model;
	gpu_model.push_back(Layer<GPU>(n_input, n_mid));
	gpu_model.push_back(Layer<GPU>(n_mid, n_output));
#endif

	for(int i = 0; i < 5;i++) {
		cout << "cpu-" << (i+1) << endl;
		run<CPU>(cpu_train_data, cpu_train_label, raw_train_label, cpu_test_data, cpu_test_label, raw_test_label, cpu_model, batch_size);
		#ifdef GPU
		cout << "gpu-" << (i+1) << endl;
		run<GPU>(gpu_train_data, gpu_train_label, raw_train_label, gpu_test_data, gpu_test_label, raw_test_label, gpu_model, batch_size);
		#endif
	}

	cout << "Done" << endl;
	return 0;
}


