#include <iostream>
#include <cstdlib>
#include <cassert>
#include <string>
#include <cstring>
#include <fstream>
#include <vector>
#include <memory>
#include <cstdlib>
#include <cuda_runtime.h>
#include <math_functions.h>
#include <cmath>
#include <ctime>
using namespace std;

void CheckCudaReturnCode(cudaError_t code, const char *fileName, int lineNo)
{
	if(code == cudaSuccess) return;
	cerr << "Cuda call failed at " << fileName << ":" << lineNo 
		<< " " << cudaGetErrorString(code) << endl;
	exit(-1);
}

#define CK(x) CheckCudaReturnCode((x), __FILE__, __LINE__)
#define CPU 0
#define GPU 1

// http://www.cnblogs.com/yeahgis/archive/2012/07/13/2590485.html
// 高斯分布的随机数，均值为0，方差为1
double gaussrand()
{
    static double V1, V2, S;
    static int phase = 0;
    double X;
     
    if ( phase == 0 ) {
        do {
            double U1 = (double)rand() / RAND_MAX;
            double U2 = (double)rand() / RAND_MAX;
             
            V1 = 2 * U1 - 1;
            V2 = 2 * U2 - 1;
            S = V1 * V1 + V2 * V2;
        } while(S >= 1 || S == 0);
         
        X = V1 * sqrt(-2 * log(S) / S);
    } else
        X = V2 * sqrt(-2 * log(S) / S);
         
    phase = 1 - phase;
 
    return X;
}

float gaussrandF()
{
	return float(gaussrand() * 0.01);
}

template<size_t IS_GPU>
struct Vector
{};

template<>
struct Vector<CPU>
{
	size_t size;
	float *data;

	Vector(size_t _size): size(_size)
	{
		data = (float*)malloc(size * sizeof(float));
		assert(data);
		memset(data, 0, size * sizeof(float));
	}

	Vector(size_t _size, float *_data) : size(_size), data(_data) {}

	void init(float v)
	{
		for(int i = 0;i < size;i++) {
			data[i] = v;
		}
	}

	void init_gauss()
	{
		for(int i = 0;i < size;i++) {
			data[i] = gaussrandF();
		}
	}

	float& operator[](size_t i)
	{
		assert(i < size);
		return data[i];
	}

	float operator[](size_t i) const
	{
		assert(i < size);
		return data[i];
	}

	Vector& operator=(const Vector& rhs)
	{
		assert(size == rhs.size);
		memcpy(data, rhs.data, size * sizeof(float));
		return *this;
	}

	Vector& operator+=(const Vector& rhs)
	{
		assert(size == rhs.size);
		for(int i = 0;i < size;i++) {
			data[i] += rhs.data[i];
		}
		return *this;
	}

	Vector& operator-=(const Vector& rhs)
	{
		assert(size == rhs.size);
		for(int i = 0;i < size;i++) {
			data[i] -= rhs.data[i];
		}
		return *this;
	}

	Vector& operator*=(const Vector& rhs)
	{
		assert(size == rhs.size);
		for(int i = 0;i < size;i++) {
			data[i] *= rhs.data[i];
		}
		return *this;
	}

	Vector& operator*=(float v)
	{
		for(int i = 0;i < size;i++) {
			data[i] *= v;
		}
		return *this;
	}

	// a += f * b
	Vector& add(float v, const Vector& rhs)
	{
		assert(size == rhs.size);
		for(int i = 0;i < size;i++) {
			data[i] += v * rhs.data[i];
		}
		return *this;
	}

	void assert_eq(const Vector& rhs)
	{
		assert(size == rhs.size);
		for(int i = 0;i < size;i++) {
			float d = data[i] - rhs[i];
			if(d < 0) d = -d;
			if(d > 1E-7) {
				cerr << "delta = " << d << endl;
				exit(-1);
			}
		}
	}

	size_t max_idx() const 
	{
		size_t ret = 0;
		for(int i = 1;i < size;i++) {
			if(data[i] > data[ret]) {
				ret = i;
			}
		}
		return ret;
	}

	Vector<GPU> to_gpu() const;

	Vector to_cpu() const {
		return *this;
	}
};

ostream& operator<<(ostream& out, const Vector<CPU>& v)
{
	out << "[(" << v.size << ") ";
	for(int i = 0;i < v.size;i++) {
		if(i > 0) out << ",";
		out << v[i];
	}
	return out;
}

inline size_t get_thread_cnt(size_t size)
{
	size = min(size, 1024UL);

	// ceil(size / 32)
	size += 31;
	size &= ~31;
	return size;
}

inline size_t ceil(size_t x, size_t y)
{
	return (x + y - 1) / y;
}

__global__ void copy(float *dst, float *src, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	dst[gidx] = src[gidx];
}

__global__ void init(float *dst, float v, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	dst[gidx] = v;
}

__global__ void add(float *x, float *y, float *z, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	z[gidx] = x[gidx] + y[gidx];
}

// z = x + v * y
__global__ void add(float *x, float v, float *y, float *z, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	z[gidx] = x[gidx] + v * y[gidx];
}

__global__ void sub(float *x, float *y, float *z, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	z[gidx] = x[gidx] - y[gidx];
}

__global__ void mul(float *x, float *y, float *z, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	z[gidx] = x[gidx] * y[gidx];
}

__global__ void mul(float *x, float v, float *z, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	z[gidx] = x[gidx] * v;
}

__global__ void mul(float *w, float *x, float *y, size_t row, size_t col)
{
	size_t row_idx = blockDim.x * blockIdx.x + threadIdx.x;
	if(row_idx >= row) return;
	float t = 0;
	for(int k = 0;k < col;k++) {
		t += w[row_idx * col + k] * x[k];
	}
	y[row_idx] = t;
}

// out = w + v * x * y
__global__ void add(float *w, float v, float *x, float *y, float *out, size_t row, size_t col)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= row * col) return;
	size_t i = gidx / col;
	size_t j = gidx % col;
	out[gidx] = w[gidx] + v * x[i] * y[j];
}

__global__ void t_and_mul(float *w, float *x, float *y, size_t row, size_t col)
{
	size_t col_idx = blockDim.x * blockIdx.x + threadIdx.x;
	if(col_idx >= col) return;
	float t = 0;
	for(int k = 0;k < row;k++) {
		t += w[k * col + col_idx] * x[k];
	}
	y[col_idx] = t;
}

template<>
struct Vector<GPU>
{
	size_t size;
	float *data;

	Vector(size_t _size): size(_size)
	{
		CK(cudaMalloc((void **)&data, size * sizeof(float)));
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::init<<<block_cnt, thread_cnt>>>(data, 0, size);
	}

	Vector(size_t _size, float *_data) : size(_size), data(_data) {}

	void init(float v)
	{
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::init<<<block_cnt, thread_cnt>>>(data, v, size);
	}

	void init_gauss()
	{
		Vector<CPU> tmp(size);
		tmp.init_gauss();
		CK(cudaMemcpy(data, tmp.data, size * sizeof(float), cudaMemcpyHostToDevice));
		free(tmp.data);
	}

	__device__ float& operator[](size_t i)
	{
		return data[i];
	}

	__device__ float operator[](size_t i) const
	{
		return data[i];
	}

	Vector& operator=(const Vector& rhs)
	{
		assert(size == rhs.size);
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::copy<<<block_cnt, thread_cnt>>>(data, rhs.data, size);
		return *this;
	}

	Vector& operator+=(const Vector& rhs)
	{
		assert(size == rhs.size);
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::add<<<block_cnt, thread_cnt>>>(data, rhs.data, data, size);
		return *this;
	}

	Vector& operator-=(const Vector& rhs)
	{
		assert(size == rhs.size);
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::sub<<<block_cnt, thread_cnt>>>(data, rhs.data, data, size);
		return *this;
	}

	Vector& operator*=(const Vector& rhs)
	{
		assert(size == rhs.size);
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::mul<<<block_cnt, thread_cnt>>>(data, rhs.data, data, size);
		return *this;
	}

	Vector& operator*=(float v)
	{
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::mul<<<block_cnt, thread_cnt>>>(data, v, data, size);
		return *this;
	}

	Vector& add(float v, const Vector& rhs)
	{
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::add<<<block_cnt, thread_cnt>>>(data, v, rhs.data, data, size);
		return *this;
	}

	Vector<CPU> to_cpu() const 
	{
		Vector<CPU> ret(size);
		CK(cudaMemcpy(ret.data, data, size * sizeof(float), cudaMemcpyDeviceToHost));
		return ret;
	}

	Vector to_gpu() const {
		return *this;
	}
};

ostream& operator<<(ostream& out, const Vector<GPU>& v)
{
	Vector<CPU> ret = v.to_cpu();
	out << ret;
	free(ret.data);
	return out;
}

Vector<GPU> Vector<CPU>::to_gpu() const
{
	Vector<GPU>	ret(size);
	CK(cudaMemcpy(ret.data, data, size * sizeof(float), cudaMemcpyHostToDevice));
	return ret;
}

template<size_t IS_GPU>
struct Matrix {};

template<>
struct Matrix<CPU>
{
	size_t row, col, size;
	float *data;

	Matrix(size_t _row, size_t _col) :
		row(_row), col(_col), size(_row * _col)
		{
			data = (float *)malloc(size * sizeof(float));
			assert(data);
			memset(data, 0, row * col * sizeof(float));
		}

	Matrix(size_t _row, size_t _col, float *_data) :
		row(_row), col(_col), size(_row * _col), data(_data)
		{}

	void init(float v)
	{
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				data[i * col + j] = v;
			}
		}
	}

	void init_gauss()
	{
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				data[i * col + j] = gaussrandF();
			}
		}
	}

	float * operator[](size_t i) {
		return data + i * col;
	}

	const float *operator[] (size_t i) const {
		return data + i * col;
	}

	void mul(const Vector<CPU>& x, Vector<CPU>& y) const
	{
		assert(col == x.size);
		assert(row == y.size);
		for(int i = 0;i < row;i++) {
			y[i] = 0;
			for(int j = 0;j < col;j++) {
				y[i] += data[i * col + j] * x[j];
			}
		}
	}

	void add(float v, const Vector<CPU> &x, const Vector<CPU> &y)
	{
		assert(row == x.size);
		assert(col == y.size);
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				data[i * col + j] += v * x[i] * y[j];
			}
		}
	}

	void t_and_mul(const Vector<CPU> &x, Vector<CPU> &y)
	{
		assert(row == x.size);
		assert(col == y.size);
		for(int j = 0;j < col;j++) {
			y[j] = 0;
			for(int i = 0;i < row;i++) {
				float t = data[i * col + j];
				y[j] += t * x[i];
			}
		}
	}

	void assert_eq(const Matrix& rhs)
	{
		assert(row == rhs.row);
		assert(col == rhs.col);
		for(int i = 0;i < row;i++) {
			for(int j = 0;j < col;j++) {
				float x = data[i * col + j];
				float y = rhs.data[i * col + j];
				float d =  x - y;
				if(d < 0) d = -d;
				if(d > 1E-7) {
					cerr << "[" << i << "," << j << "] delta=" << d << " x=" << x << " y=" << y << endl;
					exit(-1);
				}
			}
		}
	}

	Matrix<GPU> to_gpu() const;

	Matrix to_cpu() const {
		return *this;
	}
};

ostream& operator<<(ostream& out, const Matrix<CPU>& v)
{
	out << "[(" << v.row << " x " << v.col << ") " << endl;
	for(int i = 0;i < v.row;i++) {
		out << "\t";
		for(int j = 0;j < v.col;j++) {
			if(j > 0) cout << ",";
			out << v[i][j];
		}
		out << endl;
	}
	out << "]";
	return out;
}

template<>
struct Matrix<GPU>
{
	size_t row, col, size;
	float *data;

	Matrix(size_t _row, size_t _col) :
		row(_row), col(_col), size(_row * _col)
		{
			CK(cudaMalloc((void **)&data, size * sizeof(float)));
			assert(data);
			size_t thread_cnt = get_thread_cnt(size);
			size_t block_cnt = ceil(size, thread_cnt);
			::init<<<block_cnt, thread_cnt>>>(data, 0, size);
		}

	Matrix(size_t _row, size_t _col, float *_data) :
		row(_row), col(_col), size(_row * _col), data(_data)
		{}

	void init(float v)
	{
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::init<<<block_cnt, thread_cnt>>>(data, v, size);
	}

	void init_gauss()
	{
		Matrix<CPU> tmp(row, col);
		tmp.init_gauss();
		CK(cudaMemcpy(data, tmp.data, size * sizeof(float), cudaMemcpyHostToDevice));
		free(tmp.data);
	}

	__device__ float* operator[](size_t i) {
		return data + i * col;
	}

	__device__ const float *operator[] (size_t i) const {
		return data + i * col;
	}

	void mul(const Vector<GPU>& x, Vector<GPU>& y) const
	{
		assert(col == x.size);
		assert(row == y.size);
		size_t thread_cnt = get_thread_cnt(row);
		size_t block_cnt = ceil(row, thread_cnt);
		::mul<<<block_cnt, thread_cnt>>>(data, x.data, y.data, row, col);
	}

	void add(float v, const Vector<GPU> &x, const Vector<GPU> &y)
	{
		assert(row == x.size);
		assert(col == y.size);
		size_t thread_cnt = get_thread_cnt(size);
		size_t block_cnt = ceil(size, thread_cnt);
		::add<<<block_cnt, thread_cnt>>>(data, v, x.data, y.data, data, row, col);
	}

	void t_and_mul(const Vector<GPU> &x, Vector<GPU> &y)
	{
		assert(row == x.size);
		assert(col == y.size);
		size_t thread_cnt = get_thread_cnt(col);
		size_t block_cnt = ceil(col, thread_cnt);
		::t_and_mul<<<block_cnt, thread_cnt>>>(data, x.data, y.data, row, col);
	}

	Matrix<CPU> to_cpu() const {
		Matrix<CPU> ret(row, col);
		CK(cudaMemcpy(ret.data, data, size * sizeof(float), cudaMemcpyDeviceToHost));
		return ret;
	}

	Matrix to_gpu() const {
		return *this;
	}
};

ostream& operator<<(ostream& out, const Matrix<GPU>& v)
{
	Matrix<CPU> ret = v.to_cpu();
	out << ret;
	free(ret.data);
	return out;
}

Matrix<GPU> Matrix<CPU>::to_gpu() const
{
	Matrix<GPU> ret(row,col);
	CK(cudaMemcpy(ret.data, data, size * sizeof(float), cudaMemcpyHostToDevice));
	return ret;
}

template<size_t IS_GPU>
struct Softmax{};

__device__ __host__ float softmax_calc(float *x, float *y, size_t size)
{
	/*
	  log( exp(x_j) / \sum exp(x_k) )
	= x_j - log \sum exp(x_k)
	= x_j - (max + log \sum exp(x_k - max))
	*/
	float maxX = x[0];
	for(int i = 0;i < size;i++) {
		if(x[i] > maxX) {
			maxX = x[i];
		}
	}

	float xSum = 0;
	for(int i = 0;i < size;i++) {
		xSum += expf(x[i] - maxX);
	}

	float ret = 0;
	for(int i = 0;i < size;i++) {
		ret += y[i] * (x[i] - (maxX + logf(xSum)));
	}

	return -ret;
}

__device__ __host__ void softmax_propagate_delta(float *x, float *y, float *z, size_t size)
{
	/*
	  - d \sum y_j * log( exp(x_j) / \sum exp(x_k) )
	= - d \sum y_j * x_j - d \sum y_j log (\sum exp(x_k) )
	= - y_i + \sum (y_j * exp(x_i) / \sum exp(x_k))
	= - y_i + exp(x_i) (\sum y_j) / (\sum exp(x_k))
	*/

	float maxX = x[0];
	for(int i = 0;i < size;i++) {
		if(x[i] > maxX) {
			maxX = x[i];
		}
	}

	// y - exp(x) sum_of_y / sum_of_exp(x)
	float sumOfY = 0;
	float sumOfX = 0;
	for(int i = 0;i < size;i++) {
		z[i] = expf(x[i] - maxX);
		sumOfY += y[i];
		sumOfX += z[i];
	}

	float yx = sumOfY/sumOfX;
	for(int i = 0;i < size;i++) {
		z[i] = yx * z[i] - y[i];
	}
}

template<>
struct Softmax<CPU>
{
	// - \sum y_j * log( exp(x_j) / \sum exp(x_k) )
	float calc(const Vector<CPU>& x, const Vector<CPU>& y)
	{
		assert(x.size == y.size);
		return softmax_calc(x.data, y.data, x.size);
	}

	void propagate_delta(const Vector<CPU>& x, const Vector<CPU>& y, Vector<CPU>& z)
	{
		assert(x.size == y.size);
		assert(x.size == z.size);
		softmax_propagate_delta(x.data, y.data, z.data, x.size);
	}
};

__global__ void softmax_calc_kernel(float *x, float *y, size_t size, float *z)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx > 0) return;
	*z = softmax_calc(x, y, size);
}

__global__ void softmax_propagate_delta_kernel(float *x, float *y, float *z, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx > 0) return;
	softmax_propagate_delta(x,y,z,size);
}

template<>
struct Softmax<GPU>
{
	float calc(const Vector<GPU>& x, const Vector<GPU>& y)
	{
		assert(x.size == y.size);

		float *tmp, t2;
		CK(cudaMalloc((void **)&tmp, sizeof *tmp));
		softmax_calc_kernel<<<1,1>>>(x.data, y.data, x.size, tmp);
		CK(cudaMemcpy(&t2, tmp, sizeof t2, cudaMemcpyDeviceToHost));
		CK(cudaFree(tmp));
		return t2;
	}

	void propagate_delta(const Vector<GPU> &x, const Vector<GPU> &y, Vector<GPU> &z)
	{
		assert(x.size == y.size);
		assert(x.size == z.size);
		softmax_propagate_delta_kernel<<<1,1>>>(x.data, y.data, z.data, x.size);
	}
};

template<size_t IS_GPU>
struct Relu{};

template<>
struct Relu<CPU>
{
	void forward(const Vector<CPU>& x, Vector<CPU> &y)
	{
		assert(x.size == y.size);
		for(int i = 0;i < x.size;i++) {
			y[i] = x[i] >= 0 ? x[i] : 0;
		}
	}

	void derive(const Vector<CPU>& x, Vector<CPU> &y)
	{
		assert(x.size == y.size);
		for(int i = 0;i < x.size;i++) {
			y[i] = x[i] >= 0 ? 1 : 0;
		}
	}

	void derive_and_dot_into(const Vector<CPU> &x, Vector<CPU> &y)
	{
		assert(x.size == y.size);
		for(int i = 0;i < x.size;i++) {
			y[i] *= x[i] >= 0 ? 1 : 0;
		}
	}
};

__global__ void relu_forward(float *x, float *y, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	float t = x[gidx];
	if(t < 0) t = 0;
	y[gidx] = t;
}

__global__ void relu_derive(float *x, float *y, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	float t = x[gidx];
	t = t >= 0 ? 1 : 0;
	y[gidx] = t;
}

__global__ void relu_derive_and_dot(float *x, float *y, size_t size)
{
	size_t gidx = blockDim.x * blockIdx.x + threadIdx.x;
	if(gidx >= size) return;
	float t = x[gidx];
	t = t >= 0 ? 1 : 0;
	y[gidx] *= t;
}

template<>
struct Relu<GPU>
{
	void forward(const Vector<GPU> &x, Vector<GPU> &y)
	{
		assert(x.size == y.size);
		size_t thread_cnt = get_thread_cnt(x.size);
		size_t block_cnt = ceil(x.size, thread_cnt);
		relu_forward<<<block_cnt, thread_cnt>>>(x.data, y.data, x.size);
	}

	void derive(const Vector<GPU> &x, Vector<GPU> &y)
	{
		assert(x.size == y.size);
		size_t thread_cnt = get_thread_cnt(x.size);
		size_t block_cnt = ceil(x.size, thread_cnt);
		relu_derive<<<block_cnt, thread_cnt>>>(x.data, y.data, x.size);
	}

	void derive_and_dot_into(const Vector<GPU> &x, Vector<GPU> &y)
	{
		assert(x.size == y.size);
		size_t thread_cnt = get_thread_cnt(x.size);
		size_t block_cnt = ceil(x.size, thread_cnt);
		relu_derive_and_dot<<<block_cnt, thread_cnt>>>(x.data, y.data, x.size);
	}
};

template<size_t IS_GPU>
struct Layer
{
	size_t in_size, out_size;
	Vector<IS_GPU> b, z, a, delta;
	Matrix<IS_GPU> w;
	Relu<IS_GPU> relu;

	Layer(size_t _in_size, size_t _out_size) :
		in_size(_in_size), out_size(_out_size),
		b(_out_size), w(_out_size, _in_size),
		z(_out_size), a(_out_size), delta(_out_size)
	{
		b.init_gauss();
		w.init_gauss();
	}

	void calc(const Vector<IS_GPU> &in)
	{
		w.mul(in, z);
		z += b;
		relu.forward(z, a);
	}

	void propagate_delta(Vector<IS_GPU> &out)
	{
		w.t_and_mul(delta, out);
	}

	void update_parameters(float alpha, const Vector<IS_GPU> &prev_a)
	{
		b.add(-alpha, delta);
		assert(w.col == prev_a.size);
		w.add(-alpha, delta, prev_a);
	}
};

int MsbInt(char buf[])
{
	int base = 1;
	int ret = 0;
	for(int i = 3;i >= 0;i--) {
		ret += (unsigned char)buf[i] * base;
		base *= 256;
	}
	return ret;
}

vector<int> ReadMnistLabels(string fileName)
{
	vector<int> ret;
	ifstream ifs(fileName.c_str(), ios::binary);
	char buf[1000];

	// MAGIC
	ifs.read(buf, 4);
	int magic = MsbInt(buf);
	assert(magic == 0x00000801);

	// num of images
	ifs.read(buf, 4);
	int nImages = MsbInt(buf);

	while(nImages--) {
		ret.push_back(ifs.get());
	}

	return ret;
}
vector<Vector<CPU> > ReadMnistData(string fileName)
{
	vector<Vector<CPU> > ret;
	ifstream ifs(fileName.c_str(), ios::binary);
	char buf[1000];

	// MAGIC
	ifs.read(buf, 4);
	int magic = MsbInt(buf);
	assert(magic == 0x00000803);

	// num of images
	ifs.read(buf, 4);
	int nImages = MsbInt(buf);

	int row, col;
	ifs.read(buf, 4);
	row = MsbInt(buf);
	ifs.read(buf, 4);
	col = MsbInt(buf);
	assert(row == 28 && col == 28);

	while(nImages--) {
		Vector<CPU> image(row * col);
		for(int i = 0;i < row * col;i++) {
			image[i] = ifs.get() / 256.0; // 归一化
		}
		ret.push_back(image);
	}

	return ret;
}
vector<Vector<CPU> > translateLabels(vector<int> labels, int k=10)
{
	vector<Vector<CPU> > ret;
	for(int i = 0;i < labels.size();i++) {
		Vector<CPU> tmp(k);
		assert(labels[i] >= 0 && labels[i] < k);
		tmp[labels[i]] = 1;
		ret.push_back(tmp);
	}
	return ret;
}

template<size_t IS_GPU>
bool forward(
	vector<Layer<IS_GPU> >& model,
	Vector<IS_GPU>& in, Vector<IS_GPU>& label, int raw_label, 
	Softmax<IS_GPU>& s,
	float& error)
{
	size_t nLayer = model.size();

	for(int j = 0;j < nLayer;j++) {
		Layer<IS_GPU> &layer = model[j];
		if(j == 0) {
			layer.calc(in);
		} else {
			layer.calc(model[j-1].a);
		}
	}

	Layer<IS_GPU> &lastLayer = model[nLayer - 1];
	error = s.calc(lastLayer.a, label);

	Vector<CPU> result = lastLayer.a.to_cpu();
	size_t idx = result.max_idx();
	return idx == raw_label;
}

template<size_t IS_GPU>
void run(vector<Vector<IS_GPU> >&train_data,
		vector<Vector<IS_GPU> >&train_label,
		vector<int> &raw_train_label,

		vector<Vector<IS_GPU> >&test_data,
		vector<Vector<IS_GPU> >&test_label,
		vector<int> &raw_test_label,

		vector<Layer<IS_GPU> >& model
	)
{
	assert(train_data.size() == train_label.size());
	assert(train_data.size() == raw_train_label.size());
	assert(test_data.size() == test_label.size());
	assert(test_data.size() == raw_test_label.size());

	clock_t start = clock();

	size_t M = train_data.size();
	size_t T = test_data.size();

	float avg_error = 0;
	Softmax<IS_GPU> s;
	float error;
	for(int i = 0;i < M;i++) {
		forward(model, train_data[i], train_label[i], raw_train_label[i], s, error);
		avg_error += error;

		// backward
		for(int j = model.size() - 1;j >= 0;j--) {
			Layer<IS_GPU> &layer = model[j];
			if(j == model.size() - 1) {
				s.propagate_delta(layer.a, train_label[i], layer.delta);
			} else {
				model[j + 1].propagate_delta(layer.delta);
			}
			layer.relu.derive_and_dot_into(layer.a, layer.delta);
		}

		for(int j = 0;j < model.size();j++) {
			model[j].update_parameters(0.001, j == 0 ? train_data[i] : model[j-1].a);
		}
	}
	avg_error /= M;

	clock_t mid = clock();
	cout << "\ttime=" << ((mid-start)*1.0/CLOCKS_PER_SEC) << " error=" << avg_error << endl;

	// 测试
	size_t total = 0, good = 0;
	for(int i = 0;i < M;i++) {
		bool is_good = forward(model, train_data[i], train_label[i], raw_train_label[i], s, error);
		if(is_good) {
			good++;
		}
		total++;
	}
	cout << "\ttrain_accuracy=" << (good*1.0/total) << " ";

	total = good = 0;
	for(int i = 0;i < T;i++) {
		bool is_good = forward(model, test_data[i], test_label[i], raw_test_label[i], s, error);
		if(is_good) {
			good++;
		}
		total++;
	}
	cout << "test_accuracy=" << (good*1.0/total) << " ";

	clock_t end = clock();
	cout << "total_time=" << (end-start)*1.0/CLOCKS_PER_SEC << endl;
}

int main()
{
	cout << "Loading data" << endl;
	// 读取数据
	vector<int> raw_train_label = ReadMnistLabels("mnist/train-labels-idx1-ubyte");
	assert(raw_train_label.size() == 60000);
	vector<Vector<CPU> > cpu_train_data = ReadMnistData("mnist/train-images-idx3-ubyte");
	assert(cpu_train_data.size() == 60000);
	vector<Vector<GPU> > gpu_train_data;
	for(int i = 0;i < cpu_train_data.size();i++) {
		gpu_train_data.push_back(cpu_train_data[i].to_gpu());
	}
	vector<Vector<CPU> > cpu_train_label = translateLabels(raw_train_label);
	assert(cpu_train_label.size() == 60000);
	vector<Vector<GPU> > gpu_train_label;
	for(int i = 0;i < cpu_train_label.size();i++) {
		gpu_train_label.push_back(cpu_train_label[i].to_gpu());
	}


	vector<int> raw_test_label = ReadMnistLabels("mnist/t10k-labels-idx1-ubyte");
	assert(raw_test_label.size() == 10000);
	vector<Vector<CPU> > cpu_test_data = ReadMnistData("mnist/t10k-images-idx3-ubyte");
	assert(cpu_test_data.size() == 10000);
	vector<Vector<GPU> > gpu_test_data;
	for(int i = 0;i < cpu_test_data.size();i++) {
		gpu_test_data.push_back(cpu_test_data[i].to_gpu());
	}
	vector<Vector<CPU> > cpu_test_label = translateLabels(raw_test_label);
	assert(cpu_test_label.size() == 10000);
	vector<Vector<GPU> > gpu_test_label;
	for(int i = 0;i < cpu_test_label.size();i++) {
		gpu_test_label.push_back(cpu_test_label[i].to_gpu());
	}

	int n_input = cpu_train_data[0].size;
	assert(n_input == 28 * 28);
	int n_output = 10;
	int n_mid = 1024;
	int n_mid2 = 256;

	srand(1000);
	vector<Layer<CPU> > cpu_model;
	cpu_model.push_back(Layer<CPU>(n_input, n_mid));
	cpu_model.push_back(Layer<CPU>(n_mid, n_mid2));
	cpu_model.push_back(Layer<CPU>(n_mid2, n_output));

	srand(1000);
	vector<Layer<GPU> > gpu_model;
	gpu_model.push_back(Layer<GPU>(n_input, n_mid));
	gpu_model.push_back(Layer<GPU>(n_mid, n_mid2));
	gpu_model.push_back(Layer<GPU>(n_mid2, n_output));

	for(int i = 0; i < 5;i++) {
		cout << "cpu-" << (i+1) << endl;
		run<CPU>(cpu_train_data, cpu_train_label, raw_train_label, cpu_test_data, cpu_test_label, raw_test_label, cpu_model);
		cout << "gpu-" << (i+1) << endl;
		run<GPU>(gpu_train_data, gpu_train_label, raw_train_label, gpu_test_data, gpu_test_label, raw_test_label, gpu_model);
	}
	return 0;
}


