//2021-7-27
//已进行单机多线程的并行化

#include <cassert>
#include <cstdint>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <algorithm>
#include "byteswap.h"
#include "CNN/cnn.h"

//changed at 2021-7-27
#include"sleep.h"
#include<omp.h>
#include<memory>

using namespace std;

//训练次数
constexpr long EPS=100000;
//推断次数
constexpr int INFERENCING_TIME=100;
//设置线程数量
constexpr int THREAD_NUMBER=40;

float train( vector<layer_t*>& layers, tensor_t<float>& data, tensor_t<float>& expected )
{
	for ( int i = 0; i < layers.size(); i++ )
	{
		if ( i == 0 )
			activate( layers[i], data );
		else
			activate( layers[i], layers[i - 1]->out );
	}

	tensor_t<float> grads = layers.back()->out - expected;

	for ( int i = layers.size() - 1; i >= 0; i-- )
	{
		if ( i == layers.size() - 1 )
			calc_grads( layers[i], grads );
		else
			calc_grads( layers[i], layers[i + 1]->grads_in );
	}

	for ( int i = 0; i < layers.size(); i++ )
	{
		fix_weights( layers[i] );
	}

	float err = 0;
	//changed at 2021-7-28
	//https://blog.csdn.net/christprince007/article/details/39692601
	/* for ( int i = 0; i < grads.size.x * grads.size.y * grads.size.z; i++ )
	{
		float f = expected.data[i];
		if ( f > 0.5 )
			err += abs(grads.data[i]);
	} */
	int total=grads.size.x*grads.size.y*grads.size.z;
#pragma omp parallel for reduction(+:err)
	for(int i=0;i<total;++i){
		if(expected.data[i]>0.5f)err+=abs(grads.data[i]);
	}
	return err * 100;
}


void forward( vector<layer_t*>& layers, tensor_t<float>& data )
{
	for ( int i = 0; i < layers.size(); i++ )
	{
		if ( i == 0 )
			activate( layers[i], data );
		else
			activate( layers[i], layers[i - 1]->out );
	}
}

struct case_t
{
	tensor_t<float> data;
	tensor_t<float> out;
};

uint8_t* read_file( const char* szFile )
{
	ifstream file( szFile, ios::binary | ios::ate );
	streamsize size = file.tellg();
	file.seekg( 0, ios::beg );

	if ( size == -1 )
		return nullptr;

	uint8_t* buffer = new uint8_t[size];
	file.read( (char*)buffer, size );
	return buffer;
}

//changed at 2021-7-28
//参考
//http://c.biancheng.net/view/7847.html
//http://c.biancheng.net/view/7863.html
//https://stackoom.com/question/Q3uh
vector<case_t> read_test_cases()
{
	vector<case_t> cases;

	uint8_t* train_image = read_file( "train-images.idx3-ubyte" );
	uint8_t* train_labels = read_file( "train-labels.idx1-ubyte" );

	uint32_t case_count = byteswap_uint32( *(uint32_t*)(train_image + 4) );

	//changed at 2021-7-28
	case_t c0{tensor_t<float>( 28, 28, 1 ), tensor_t<float>( 10, 1, 1 )};
	cases.resize(case_count,c0);
#pragma omp parallel for
	for ( int i = 0; i < case_count; i++ )
	{
		//case_t c {tensor_t<float>( 28, 28, 1 ), tensor_t<float>( 10, 1, 1 )};
		case_t&c=cases[i];

		uint8_t* img = train_image + 16 + i * (28 * 28);
		uint8_t* label = train_labels + 8 + i;

		for ( int x = 0; x < 28; x++ )
			for ( int y = 0; y < 28; y++ )
				c.data( x, y, 0 ) = img[x + y * 28] / 255.f;

		for ( int b = 0; b < 10; b++ )
			c.out( b, 0, 0 ) = *label == b ? 1.0f : 0.0f;

		//cases.push_back( c );
		//cases[i]=c;
	}
	delete[] train_image;
	delete[] train_labels;

	return cases;
	//return move(cases);
}

float train(vector<layer_t*>&layers,case_t&theCase)
{
	return train(layers,theCase.data,theCase.out);
}
void trainingProcess(vector<case_t>&cases,vector<layer_t*>&layers,long ep_total=EPS)
{
	constexpr long REPORT_PERIOD=1000;

	float amse = 0;
	int ic = 0;

	/* for ( long ep = 0; ep < ep_total; )
	{

		for ( case_t& t : cases )
		{
			float xerr = train( layers, t.data, t.out );
			amse += xerr;

			ep++;
			ic++;

			if ( ep % 1000 == 0 )
				cout << "case " << ep << " err=" << amse/ic << endl;

			// if ( GetAsyncKeyState( VK_F1 ) & 0x8000 )
			// {
			//	   printf( "err=%.4f%\n", amse / ic  );
			//	   goto end;
			// }
		}
	} */
	long ep=0;
	long case_size=(long)cases.size();
	while(ep<ep_total){
		amse+=train(layers,cases[ep%case_size]);
		ep++;
		ic++;
		if(ep%REPORT_PERIOD==0)cout<<"case "<<ep<<" err="<<amse/ic<<endl;
	}
}

bool read_test_ppm(tensor_t<float>&image,const string&ppmFilePath)
{
	uint8_t * data = read_file(ppmFilePath.c_str());

	if ( data )
	{
		uint8_t * usable = data;

		while ( *(uint32_t*)usable != 0x0A353532 )
			usable++;

#pragma pack(push, 1)
		struct RGB
		{
			uint8_t r, g, b;
		};
#pragma pack(pop)

		RGB * rgb = (RGB*)usable;

		for ( int i = 0; i < 28; i++ )
		{
			for ( int j = 0; j < 28; j++ )
			{
				RGB rgb_ij = rgb[i * 28 + j];
				image( j, i, 0 ) = (((float)rgb_ij.r
						     + rgb_ij.g
						     + rgb_ij.b)
						    / (3.0f*255.f));
			}
		}

		delete[] data;

		return true;
	}
	else return false;
}
void print_result(tensor_t<float>&out)
{
	for ( int i = 0; i < 10; i++ )
	{
		printf( "[%i] %f\n", i, out( i, 0, 0 )*100.0f );
	}
}
void inference(vector<layer_t*>&layers,int time=INFERENCING_TIME)
{
	tensor_t<float> image(28, 28, 1);
	if(!read_test_ppm(image,"test.ppm"))return;
	//changed at 2021-7-28
	for(int k=0;k<time;++k)
	{
		forward( layers, image );
		print_result(layers.back()->out);

		/* struct timespec wait;
		wait.tv_sec = 1;
		wait.tv_nsec = 0;
		nanosleep(&wait, nullptr); */
	}
}

int main()
{
	//changed at 2021-7-27
#ifdef _OPENMP
	omp_set_num_threads(THREAD_NUMBER);
#endif

	vector<case_t> cases = read_test_cases();

	vector<layer_t*> layers;

	//changed at 2021-7-28
	//conv_layer_t * layer1 = new conv_layer_t( 1, 5, 8, cases[0].data.size );		// 28 * 28 * 1 -> 24 * 24 * 8
	unique_ptr<conv_layer_t>layer1=make_unique<conv_layer_t>(1,5,8,cases.front().data.size);
	//relu_layer_t * layer2 = new relu_layer_t( layer1->out.size );
	unique_ptr<relu_layer_t>layer2=make_unique<relu_layer_t>(layer1->out.size);
	//pool_layer_t * layer3 = new pool_layer_t( 2, 2, layer2->out.size );				// 24 * 24 * 8 -> 12 * 12 * 8
	unique_ptr<pool_layer_t>layer3=make_unique<pool_layer_t>(2,2,layer2->out.size);
	//fc_layer_t * layer4 = new fc_layer_t(layer3->out.size, 10);					// 4 * 4 * 16 -> 10
	unique_ptr<fc_layer_t>layer4=make_unique<fc_layer_t>(layer3->out.size,10);

	layers.push_back( (layer_t*)layer1.get() );
	layers.push_back( (layer_t*)layer2.get() );
	layers.push_back( (layer_t*)layer3.get() );
	layers.push_back( (layer_t*)layer4.get() );

	//changed at 2021-7-28
	cout<<"dataset size: "<<cases.size()<<endl;

	trainingProcess(cases,layers);
	// end:

	inference(layers);
	
	//changed at 2021-7-28
	//检查哪里释放了layers的指针
	//for(auto&layer:layers)delete layer;
	/* delete layer1;
	delete layer2;
	delete layer3;
	delete layer4; */

	return 0;
}
