﻿/*         //////////////////注意事项/////////////// || ///////////////////warning//////////////////*
*	神经元输出定位时记得减一 *
*	神经网络规模增大时记得改结构体位域 *
*	该释放的记得free *
*	量变产生质变，无数个bug组成的程序随便改动可能造成不可逆性损伤以至于无法使用 *
*	结构体中位域的使用已经全部取消，不必担心 *
* *
*/

#include <iostream>
#include <fstream>
#include <Windows.h>
#include "cJSON.h"
//#include <string>
//#include <cmath>

//下面为数据读入量参数 如果超过int默认4字节计算范围，请修改train函数
#define data_MAX_line 1000 //最大读入数据行数
#define data_MAX_line_uint 100 //每行最大读入数据单元量

//下面为神经网络规模参数
#define nnnu_MAX_nu_calculate 100000 //最大神经元计算量
#define nnnu_MAX_output 300 //最大神经元输出枝数量（计算时）
#define nnnu_MAX_retain 100	//最大神经元输出枝数量（保留时）

//分辨率
#define ___resolution_int 255//所有数据真实值=储存值除以分辨率
#define ___resolution_float 255.0//所有数据真实值=储存值除以分辨率

//下面为神经元计算参数（最大值修改时注意更改神经元结构体位域变量空间分配值）
#define nnnu_BORDER_of_get_t_collection 650250 //结算前边缘
#define nnnu_BORDER_of_get_t_calculation 65025 //神经元趋向计算时get边缘
#define nnnu_BORDER_of_weight 650250 //神经元输出支权重的绝对值不会超过该值
#define nnnu_BORDER_of_bias 650250 //神经元输出支偏置的绝对值不会超过该值
#define nnnu_BORDER_of_forward 130050//趋向值不会超过该边缘
#define nnnu_step_length .01//步长系数
#define nnnu_inheritance_coeffcient 65025//遗传越界回调系数
#define nnnu_true_matching_attenuation_coefficient 8.0//结果正确匹配时对loss的衰减系数

//波动参数
#define nnnu_BORDER_of_init_rand_weight_t 0//神经网络读取初始化随机权重改变值范围
#define nnnu_BORDER_of_weight_rand_float 3//神经网络训练时权重随机变化范围
#define nnnu_BORDER_of_bias_rand_float 3//神经网络训练时权重随机变化范围

//取神经元函数
#define nnnu_GET_nu_inner(layer, num) (((*nn_layer_p)[layer-1])[num-1]) //在类内部获取神经元(两个数从1开始算)

//下面为计算统计数据字符串位数 例：3位则最多储存99(一位截至位)
#define data_MAX_collect_time_BYTE 10

//下面是个人调试参数(与神经网络无关
#define NN_filename "nn.json" //默认神经网络数据保存文件地址
#define DATA_filename "train_11c_big.csv" //默认训练数据保存地址
#define RESULT_filename "result.csv" //默认训练后数据结果存放文件地址

using namespace std;

//神经元输出支信息
struct nnnu_output
{
	unsigned int location_layer;//输出对应位置 层数
	unsigned int location_num;//输出对应位置 层中位数
	int weight;//权重
	int bias;//偏置
};

//神经元
struct nnnu__
{
	unsigned int layer;//所在层
	unsigned int layer_num;//所在层中个数
	unsigned int threshold;//激活阈值
	unsigned int nuop_len;//输出支总数
	int get_t;//已捕获总值
	int forward_t;//正为趋向增加，负则趋向减少，（趋向正确）(正则需要增加才能更正确)（取期望减原有）
	nnnu_output*** nuop_arr_p;//输出支数组指针
};

//神经网络
class NN
{
public:

	//加载或者建立一个神经网络(函数结束前已free输入的参数),第二参数：随机初始化改变值（0则不变）
	void build(const char* cont, int init_rand_t_border);

	//对数值计算后加入到目标神经元捕获总值
	void nnnu_trans_add(nnnu_output* info, int value);

	//输入数据
	void input(const unsigned char* input_arr, int len);

	//激活函数
	int nu_get_Activation(int get_i);

	//处理所有神经元
	void nnnu_reaction(bool collect_evolution_data, bool collect_refactor_data);

	//输出结果
	void output(int* output_arr_p);

	//获取总层数
	int get_layer_num();

	//取得某一层神经元数量
	int get_layer_nus_num(int layern);

	//在类外部获取神经元(两个数从1开始算)
	nnnu__ get_nu(unsigned int layer, unsigned int num);

	//刷新(置零所有 get_t )
	void reflesh();

	//反向传播计算函数（返回值：趋向 res_p返回建议改变值：[0]=weight，[1]=bias）
	int BP_calculation(int get_t, int weight_self, int bias_self, int loss_aim, double loss_result, int* res_p);

	//更新(更新权重、偏置等参数)返回损失值
	double update(int* result, bool result_matching);

	//进化(更新阈值等)
	void evolution();

	//重构(增删神经元等)
	void refactor();

	//保存为字符串形式(json格式)
	string save();

	//释放所有占用资源
	void free_all_source();


private:
	unsigned int nn_layer;//神经元层数
	unsigned int** nn_layer_num;//神经元每层个数数组指针
	nnnu__**** nn_layer_p;//神经网络神经元层数组指针
	//int* nn_train_evolution_data;//训练时收集的进化所需数据指针
	//int* nn_train_refactor_data;//训练时收集的重构所需数据指针
};

void NN::build(const char* cont, int init_rand_t_border)
{
	cJSON* NNstr = cJSON_Parse(cont);//原始json
	cJSON* NNarr = cJSON_GetObjectItem(NNstr, "layers");//神经元层json数组
	nn_layer = cJSON_GetArraySize(NNarr);//获取总共多少层
	nn_layer_num = (unsigned int**)malloc(sizeof(unsigned int*));
	nn_layer_p = (nnnu__****)malloc(sizeof(nnnu__***));
	unsigned int* nn_layer_num_arr = (unsigned int*)malloc(sizeof(unsigned int) * nn_layer);//每层多少神经元的数组的指针
	nnnu__*** nn_layer_cont_arr = (nnnu__***)malloc(sizeof(nnnu__**) * nn_layer);//神经元层数组的指针
	*nn_layer_num = nn_layer_num_arr;
	*nn_layer_p = nn_layer_cont_arr;
	for (int layern = 0; layern < nn_layer - 1; layern++)//每个神经元层
	{
		cJSON* NNnu_layer = cJSON_GetArrayItem(NNarr, layern);//神经元层原始json
		cJSON* NNnu_layer_nu_arr = cJSON_GetObjectItem(NNnu_layer, "neurals");//神经元数组json
		const unsigned int layer_num = cJSON_GetArraySize(NNnu_layer_nu_arr);//层内神经元数量
		nn_layer_num_arr[layern] = layer_num;                  //获取每层神经元数量
		nn_layer_cont_arr[layern] = (nnnu__**)malloc(sizeof(nnnu__*) * layer_num);//分配内存
		nnnu__** nnnu_layer_arr = nn_layer_cont_arr[layern]; //创建神经元层神经元数组指针
		//cout << "layer" << layern << "\t" << "layer_num:" << nn_layer_num_arr[layern] << endl;
		for (int num0 = 0; num0 < layer_num; num0++)//每个神经元
		{
			cJSON* nnnu_json = cJSON_GetArrayItem(NNnu_layer_nu_arr, num0);//神经元原始json
			cJSON* nnnu_threshold_json = cJSON_GetObjectItem(nnnu_json, "threshold");//神经元阈值json
			cJSON* nnnu_output_arr_json = cJSON_GetObjectItem(nnnu_json, "output");//神经元输出支数组json
			unsigned int nu_output_num = (unsigned int)cJSON_GetArraySize(nnnu_output_arr_json);//获取输出支总数
			nnnu__* nnnu_creating = (nnnu__*)malloc(sizeof(nnnu__));//创建神经元结构体指针
			nnnu_creating->threshold = nnnu_threshold_json->valueint;//获取神经元阈值
			nnnu_creating->layer = layern + 1;
			nnnu_creating->layer_num = num0 + 1;
			nnnu_creating->nuop_len = nu_output_num;
			nnnu_creating->get_t = 0;
			nnnu_creating->forward_t = 0;

			nnnu_creating->nuop_arr_p = (nnnu_output***)malloc(sizeof(nnnu_output**));
			nnnu_output** nuop_arr_pointer = (nnnu_output**)malloc(sizeof(nnnu_output*) * nu_output_num);
			*nnnu_creating->nuop_arr_p = nuop_arr_pointer;

			//cout << "\tneural" << num0 << "\t" << "thoreshold" << nnnu_creating->threshold << "\t" << "layer:" << nnnu_creating->layer << "\t" << "num" << nnnu_creating->layer_num << "\t" << "nuop_len" << nnnu_creating->nuop_len << "\t" << endl;
			for (int num1 = 0; num1 < nu_output_num; num1++)//每个神经元输出枝
			{
				cJSON* nu_output_branch = cJSON_GetArrayItem(nnnu_output_arr_json, num1);//输出支json

				nuop_arr_pointer[num1] = (nnnu_output*)malloc(sizeof(nnnu_output));//分配内存
				nnnu_output* branch_creating = nuop_arr_pointer[num1];//引用

				cJSON* nu_output_branch_layer = cJSON_GetObjectItem(nu_output_branch, "layer");//获取所在层数json
				branch_creating->location_layer = (unsigned int)nu_output_branch_layer->valueint;//获取所在层数

				nu_output_branch = cJSON_GetArrayItem(nnnu_output_arr_json, num1);//输出支json
				cJSON* nu_output_branch_layer_num = cJSON_GetObjectItem(nu_output_branch, "num");//获取所在层中个数json
				branch_creating->location_num = (unsigned int)nu_output_branch_layer_num->valueint;//获取所在层中个数

				cJSON* nu_output_branch_weight = cJSON_GetObjectItem(nu_output_branch, "weight");//获取权重值json
				branch_creating->weight = nu_output_branch_weight->valueint;//获取权重值
				if (init_rand_t_border)
				{
					branch_creating->weight += (rand() % (init_rand_t_border * 2)) - init_rand_t_border;
				}

				cJSON* nu_output_branch_bias = cJSON_GetObjectItem(nu_output_branch, "bias");//获取偏置值json
				branch_creating->bias = nu_output_branch_bias->valueint;//获取偏置值
				//branch_creating->bias = 0;
				//cout << "\t\tnuop" << num1 << "\t" << "aim_loc_layer:" << branch_creating->location_layer << "\t" << "aim_loc_num:" << branch_creating->location_num << "\t" << "weight:" << branch_creating->weight << "\t" << "bias:" << branch_creating->bias << endl;
				//cJSON_Delete(nu_output_branch);
				//cJSON_Delete(nu_output_branch_layer);//释放所在层数json对象
				//cJSON_Delete(nu_output_branch_layer_num);//释放所在层中个数json对象
				//cJSON_Delete(nu_output_branch_weight);//释放权重json对象
				//cJSON_Delete(nu_output_branch_bias);//释放偏置值json对象
			}
			nnnu_layer_arr[num0] = nnnu_creating;//按位储存神经元结构体指针
			////释放所有json对象
			//cJSON_Delete(nnnu_json);
			//cJSON_Delete(nnnu_threshold_json);
			//cJSON_Delete(nnnu_output_arr_json);
		}

		////释放所有json对象
		//cJSON_Delete(NNnu_layer);
		//cJSON_Delete(NNnu_layer_nu_arr);
	}



	//输出层
	cJSON* NNnu_layer = cJSON_GetArrayItem(NNarr, nn_layer - 1);//神经元层原始json
	cJSON* NNnu_layer_nu_arr = cJSON_GetObjectItem(NNnu_layer, "neurals");//神经元数组json
	const unsigned int layer_num = cJSON_GetArraySize(NNnu_layer_nu_arr);//层内神经元数量
	nn_layer_num_arr[nn_layer - 1] = layer_num;                  //获取每层神经元数量
	nn_layer_cont_arr[nn_layer - 1] = (nnnu__**)malloc(sizeof(nnnu__*) * layer_num);//分配内存
	nnnu__** nnnu_layer_arr = nn_layer_cont_arr[nn_layer - 1]; //创建神经元层神经元数组指针
	for (int num0 = 0; num0 < layer_num; num0++)//每个神经元
	{
		cJSON* nnnu_json = cJSON_GetArrayItem(NNnu_layer_nu_arr, layer_num);//神经元原始json
		nnnu_layer_arr[num0] = (nnnu__*)malloc(sizeof(nnnu__));//创建神经元结构体指针
		nnnu__* nu_creating = nnnu_layer_arr[num0];
		nu_creating->get_t = 0;
		nu_creating->layer = nn_layer;
		nu_creating->layer_num = num0 + 1;
		nu_creating->nuop_len = 0;
		nu_creating->threshold = 0;
		nu_creating->forward_t = 0;
		nu_creating->nuop_arr_p = 0;
		////释放所有json对象
		//cJSON_Delete(nnnu_json);
	}
	////释放所有json对象
	//cJSON_Delete(NNnu_layer);
	//cJSON_Delete(NNnu_layer_nu_arr);


	////释放所有json对象
	cJSON_Delete(NNstr);
	//cJSON_Delete(NNarr);
	free((void*)cont);
	return;
}

//对数值计算后加入到目标神经元捕获总值
void NN::nnnu_trans_add(nnnu_output* info, int value)
{
	//cout << value << endl;
	//cout << "aim_layer " << info.location_layer << "\t" << "aim_num " << info.location_num << "\t" << "weight " << info.weight << "\t" << "bias " << info.bias << value << "\t";
	if (info->location_layer <= nn_layer)
	{
		unsigned int* layer_num_arr = *nn_layer_num;
		if (info->location_num <= layer_num_arr[info->location_layer - 1])
		{
			if (value > nnnu_BORDER_of_get_t_calculation) value = nnnu_BORDER_of_get_t_calculation;
			if (value < -nnnu_BORDER_of_get_t_calculation) value = -nnnu_BORDER_of_get_t_calculation;
			nnnu__* nu_target = nnnu_GET_nu_inner(info->location_layer, info->location_num);
			nu_target->get_t += (int)((info->weight * value + info->bias) / ___resolution_int);
			//cout << "\t" << "v:" << value <<"\t" << "aim_get_t:" << nu_target->get_t << "\t";
			if (nu_target->get_t > nnnu_BORDER_of_get_t_collection) nu_target->get_t = nnnu_BORDER_of_get_t_collection;
			if (nu_target->get_t < -nnnu_BORDER_of_get_t_collection) nu_target->get_t = -nnnu_BORDER_of_get_t_collection;
			//cout << "add: " << add_t << "\t" << "OK" << endl;
		}
	}
}

//输入数据////////长度不足则剩余不管，长度超过则剩余不管
void NN::input(const unsigned char* input_arr, int len)
{
	for (int num1 = 0; num1 < (*nn_layer_num)[0] && num1 < len; num1++)
	{
		nnnu_GET_nu_inner(1, num1 + 1)->get_t = input_arr[num1] * ___resolution_int;
	}
	return;
}

//激活函数
int NN::nu_get_Activation(int get_i)
{
	//激活系数
	double ipt = 2.0 - (abs(get_i) / (___resolution_float * ___resolution_float));
	ipt *= ipt;
	if (ipt > 2.0)ipt = 2.0;
	//cout << "\t\t\t" << "source get_v" << get_i << "\t" << "activation coefficient:" << ipt;
	return (int)(get_i * ipt);
}

//处理所有神经元
void NN::nnnu_reaction(bool  collect_evolution_data, bool collect_refactor_data)
{
	for (int num1 = 0; num1 < nn_layer - 1; num1++)//每个神经元层
	{
		//cout << "layer:" << num1+1 << endl;
		nnnu__** nu_layer_handing = ((*nn_layer_p)[num1]);
		unsigned int* nn_layer_max = *nn_layer_num;
		//cout << "layer :" << num1 << endl;
		for (int num2 = 0; num2 < nn_layer_max[num1]; num2++)//神经元层中每个神经元
		{
			nnnu__* nnnu_handing = nu_layer_handing[num2];
			//cout << "\t" << "neural:" << num2 << "\t" << "get:" << nnnu_handing->get_t << endl;
			//cout << "nneural:" << num2 + 1 << "\t" << nnnu_handing->get_t << endl;
			int get_i = nnnu_handing->get_t;
			if (get_i > nnnu_BORDER_of_get_t_calculation) get_i = nnnu_BORDER_of_get_t_calculation;
			if (get_i < -nnnu_BORDER_of_get_t_calculation) get_i = -nnnu_BORDER_of_get_t_calculation;
			if (get_i > nnnu_handing->threshold)//高于阈值后激活神经元传递信号
			{
				int get_activ = nu_get_Activation(get_i);
				for (int num3 = 0; num3 < nnnu_MAX_output && num3 < nnnu_handing->nuop_len; num3++)//每个输出枝
				{
					//cout <<"\t\t" << "output:" << num3 << endl;
					nnnu_trans_add((*nnnu_handing->nuop_arr_p)[num3], get_activ);
					//cout << "trans_add —— OK" << endl;
				}
			}
			//cout << endl;
		}
	}
}

//输出结果
void NN::output(int* output_arr_p)
{
	nnnu__** nn_nus = (*nn_layer_p)[nn_layer - 1];
	for (int num1 = 0; num1 < (*nn_layer_num)[nn_layer - 1]; num1++)
	{
		int pre_recv = nn_nus[num1]->get_t;
		if (pre_recv > nnnu_BORDER_of_get_t_calculation) pre_recv = nnnu_BORDER_of_get_t_calculation;
		if (pre_recv < -nnnu_BORDER_of_get_t_calculation) pre_recv = -nnnu_BORDER_of_get_t_calculation;
		output_arr_p[num1] = pre_recv / ___resolution_int;
	}
	return;
}

//获取总层数
int NN::get_layer_num()
{
	return nn_layer;
}

//取得某一层神经元数量(从0开始)
int NN::get_layer_nus_num(int layern)
{
	return (*nn_layer_num)[layern];
}


//在类外部获取神经元(两个数从1开始算)
nnnu__ NN::get_nu(unsigned int layer, unsigned int num)
{
	return *(((*nn_layer_p)[layer - 1])[num - 1]);
}

//刷新(置零所有 get_t  forward_t)
void NN::reflesh()
{
	for (int num1 = 0; num1 < nn_layer; num1++)//每个神经元层
	{
		nnnu__** nu_layer_handing = ((*nn_layer_p)[num1]);//提取神经元结构体指针数组
		for (int num2 = 0; num2 < (*nn_layer_num)[num1]; num2++)//神经元层中每个神经元
		{
			nnnu__* nnnu_handing = nu_layer_handing[num2];//提取神经元
			nnnu_handing->get_t = 0;// 清空/置零 捕获总值
			nnnu_handing->forward_t = 0;// 清空/置零 趋向值
		}
	}
	return;
}


//反向传播计算函数（返回值：趋向 res_p返回建议改变值：[0]=weight，[1]=bias）
int NN::BP_calculation(int get_t, int weight_self, int bias_self, int loss_aim, double loss_result, int* res_p)
{
	//cout << get_t << "\t" << weight_self << "\t" << bias_self << "\t" << output_t << "\t" << loss_aim << "\t" << endl;
	res_p[0] = 0;
	res_p[1] = 0;
	if (loss_aim != 0)
	{
		if (get_t > 0)
		{
			if (get_t > nnnu_BORDER_of_get_t_calculation)get_t = nnnu_BORDER_of_get_t_calculation;
			double ipxp = get_t / (___resolution_float * ___resolution_float);
			double ipxn = 1. - ipxp;
			double loss_attenuation = loss_aim * .5 / (___resolution_float * ___resolution_float);//loss衰减系数底数
			loss_attenuation *= loss_attenuation;
			double loss_engine_coeffcient = (double)loss_aim / ((1. + loss_attenuation) * ___resolution_float);
			double universal_attenuation = 1.5 - (ipxp * ipxp + ipxn * ipxn);
			res_p[0] = nnnu_step_length * (loss_result * universal_attenuation * ipxp * ipxp * loss_engine_coeffcient);
			res_p[1] = nnnu_step_length * (loss_result * universal_attenuation * ipxn * ipxn * loss_engine_coeffcient);
			//cout << "get_t:" << get_t << "\t" << "universal_attenuation:" << universal_attenuation << "\t" << "loss_engine_coeffcient:" << loss_engine_coeffcient << "\t" << "weight_t:" << res_p[0] << "\t" << "bias_t:" << res_p[1] << endl;
			//return loss_engine_coeffcient * ___resolution_int + nnnu_inheritance_coeffcient * (1. - loss_attenuation);
			return nnnu_step_length * weight_self * (loss_engine_coeffcient / ___resolution_float);
			//getT小就变bias大就weight， 中肯时大传loss，两极时削弱loss传递
		}
		else
		{
			return nnnu_inheritance_coeffcient;
		}
	}
	return 0;
}

//更新(更新权重、偏置等参数)返回损失值
double NN::update(int* result, bool result_matching)
{
	//根据上一神经元高了还是低了判断 偏置缓慢跟随权重改变

	//处理输出层
	nnnu__** nn_opl = ((*nn_layer_p)[nn_layer - 1]);
	int output_nus_num = (*nn_layer_num)[nn_layer - 1];
	double loss_t = 0;
	for (int num9 = 0; num9 < output_nus_num; num9++)
	{
		nnnu__* nu_handing = nn_opl[num9];
		int get_w = nu_handing->get_t;
		if (get_w > nnnu_BORDER_of_get_t_calculation)		get_w = nnnu_BORDER_of_get_t_calculation;//限制上限
		if (get_w < -nnnu_BORDER_of_get_t_calculation)		get_w = -nnnu_BORDER_of_get_t_calculation;//限制下限
		nu_handing->forward_t = result[num9] * ___resolution_int - get_w;
		//cout << "\t" << result[num9] << "\t" << get_w << "\t" << nu_handing->forward_t << endl;
		if (nu_handing->forward_t > nnnu_BORDER_of_forward)
		{
			nu_handing->forward_t = nnnu_BORDER_of_forward;
		}
		else if (nu_handing->forward_t < -nnnu_BORDER_of_forward)
		{
			nu_handing->forward_t = -nnnu_BORDER_of_forward;
		}
		loss_t += abs(nu_handing->forward_t);
	}
	loss_t /= (output_nus_num * ___resolution_float * ___resolution_float);

	for (int num1 = nn_layer - 2; num1 > -1; num1--)//处理剩余每一层
	{
		nnnu__** nn_layer_nus_arr = ((*nn_layer_p)[num1]);
		for (int num2 = (*nn_layer_num)[num1] - 1; num2 > -1; num2--)//每个神经元
		{
			nnnu__* nu_handing = nn_layer_nus_arr[num2];
			int get_w = nu_handing->get_t;
			if (get_w > nnnu_BORDER_of_get_t_collection)		get_w = nnnu_BORDER_of_get_t_collection;//限制上限
			if (get_w < -nnnu_BORDER_of_get_t_collection)		get_w = -nnnu_BORDER_of_get_t_collection;//限制下限
			for (int num3 = 0; num3 < nu_handing->nuop_len; num3++)//每个输出支
			{
				nnnu_output* nuop_handing = (*(nu_handing->nuop_arr_p))[num3];
				//权重更新
				int aft[2] = { 0,0 };
				nu_handing->forward_t += BP_calculation(nu_handing->get_t, nuop_handing->weight, nuop_handing->bias, nnnu_GET_nu_inner(nuop_handing->location_layer, nuop_handing->location_num)->forward_t, result_matching ? loss_t/ nnnu_true_matching_attenuation_coefficient : loss_t, aft);
				nuop_handing->weight += aft[0] + (rand() % (nnnu_BORDER_of_weight_rand_float * 2)) - nnnu_BORDER_of_weight_rand_float;
				nuop_handing->bias += aft[1] + (rand() % (nnnu_BORDER_of_bias_rand_float * 2)) - nnnu_BORDER_of_bias_rand_float;
				if (nuop_handing->weight > nnnu_BORDER_of_weight)	nuop_handing->weight = nnnu_BORDER_of_weight;
				if (nuop_handing->weight < -nnnu_BORDER_of_weight)	nuop_handing->weight = -nnnu_BORDER_of_weight;
				if (nuop_handing->bias > nnnu_BORDER_of_bias)	nuop_handing->bias = nnnu_BORDER_of_bias;
				if (nuop_handing->bias < -nnnu_BORDER_of_bias)	nuop_handing->bias = -nnnu_BORDER_of_bias;
			}
			if (nu_handing->forward_t != 0)	nu_handing->forward_t /= nu_handing->nuop_len;
			if (nu_handing->forward_t > nnnu_BORDER_of_forward) nu_handing->forward_t = nnnu_BORDER_of_forward;//限制趋向上限
			if (nu_handing->forward_t < -nnnu_BORDER_of_forward) nu_handing->forward_t = -nnnu_BORDER_of_forward;//限制趋向下限
		}
	}
	free(result);
	return loss_t;
}

//进化(更新阈值等)
void NN::evolution()
{
	//暂时不考虑
	//根据神经元接收量升降神经元阈值(接收低值与高值分布)[可能使用卷积神经网络判断进化方向与步长会很不错]
	return;
}

//重构(增删神经元等)
void NN::refactor()
{
	//暂时不考虑
	//根据神经元效用指数(根据输出量以及输出目标阈值等由一系列公式计算得出)增删神经元
	return;
}

//保存为字符串形式(json格式)
string NN::save()
{
	cJSON* json_nn_arr = cJSON_CreateArray();//创建神经元层数组
	for (int num1 = 0; num1 < nn_layer - 1; num1++)//每个神经元层
	{
		int layer_len = (*nn_layer_num)[num1];//获取该层神经元数量
		nnnu__** layer_nus_arr = ((*nn_layer_p)[num1]);//提取该层神经元指针数组
		cJSON* json_nn_layer_nu_arr = cJSON_CreateArray();//创建层内神经元数组json
		for (int num2 = 0; num2 < layer_len; num2++)//层中每个神经元
		{
			nnnu__ nu_handing = *(layer_nus_arr[num2]);//取出当前处理中的神经元结构体
			cJSON* json_nn_nu_output_arr = cJSON_CreateArray();//创建神经元输出支数组json数组
			for (int num3 = 0; num3 < nu_handing.nuop_len; num3++)//处理中神经元的每个输出支
			{
				nnnu_output* op_handing = (*(nu_handing.nuop_arr_p))[num3];//提取神经元输出支对象
				cJSON* json_nn_nu_output_obj = cJSON_CreateObject();//神经元输出支对象
				cJSON_AddNumberToObject(json_nn_nu_output_obj, "layer", op_handing->location_layer);//保存目标所在层
				cJSON_AddNumberToObject(json_nn_nu_output_obj, "num", op_handing->location_num);//保存目标所在层中个数
				cJSON_AddNumberToObject(json_nn_nu_output_obj, "weight", op_handing->weight);//保存输出支权重
				cJSON_AddNumberToObject(json_nn_nu_output_obj, "bias", op_handing->bias);//保存输出支偏置
				cJSON_AddItemToArray(json_nn_nu_output_arr, json_nn_nu_output_obj);//插入神经元输出支对象
				//cJSON_Delete(json_nn_nu_output_obj);//释放神经元输出支对象
			}
			cJSON* json_nn_nu_obj = cJSON_CreateObject();//创建神经元对象
			cJSON_AddItemToObject(json_nn_nu_obj, "output", json_nn_nu_output_arr);//保存输出支数组
			cJSON_AddNumberToObject(json_nn_nu_obj, "layer_num", num2);//保存所在层中个数
			cJSON_AddNumberToObject(json_nn_nu_obj, "threshold", nu_handing.threshold);//保存阈值
			cJSON_AddItemToArray(json_nn_layer_nu_arr, json_nn_nu_obj);//插入神经元对象
			//cJSON_Delete(json_nn_nu_obj);//释放神经元对象
			//cJSON_Delete(json_nn_nu_output_arr);//释放神经元输出支数组json
		}
		cJSON* json_nn_layer_obj = cJSON_CreateObject();//创建神经元层对象
		cJSON_AddItemToObject(json_nn_layer_obj, "neurals", json_nn_layer_nu_arr);//保存数组到神经元层对象
		cJSON_AddNumberToObject(json_nn_layer_obj, "layer", num1 + 1);//保存所在层
		cJSON_AddItemToArray(json_nn_arr, json_nn_layer_obj);//保存神经元层对象总数组
		//cJSON_Delete(json_nn_layer_nu_arr);//释放神经元数组json
		//cJSON_Delete(json_nn_layer_obj);//释放神经元层对象json
	}

	//输出层
	int layer_len = (*nn_layer_num)[nn_layer - 1];//获取该层神经元数量
	nnnu__** layer_nus_arr = ((*nn_layer_p)[nn_layer - 1]);//提取该层神经元指针数组
	cJSON* json_nn_layer_nu_arr = cJSON_CreateArray();//创建层内神经元数组json
	for (int num2 = 0; num2 < layer_len; num2++)//层中每个神经元
	{
		nnnu__ nu_handing = *(layer_nus_arr[num2]);//取出当前处理中的神经元结构体
		cJSON* json_nn_nu_output_arr = cJSON_CreateArray();//创建神经元输出支数组json数组
		cJSON* json_nn_nu_obj = cJSON_CreateObject();//创建神经元对象
		cJSON_AddNumberToObject(json_nn_nu_obj, "layer_num", num2);//保存所在层中个数
		cJSON_AddItemToArray(json_nn_layer_nu_arr, json_nn_nu_obj);//插入神经元对象
		//cJSON_Delete(json_nn_nu_obj);//释放神经元对象
		//cJSON_Delete(json_nn_nu_output_arr);//释放神经元输出支数组json
	}
	cJSON* json_nn_layer_obj = cJSON_CreateObject();//创建神经元层对象
	cJSON_AddItemToObject(json_nn_layer_obj, "neurals", json_nn_layer_nu_arr);//保存数组到神经元层对象
	cJSON_AddNumberToObject(json_nn_layer_obj, "layer", nn_layer);//保存所在层
	cJSON_AddItemToArray(json_nn_arr, json_nn_layer_obj);//保存神经元层对象总数组
	//cJSON_Delete(json_nn_layer_nu_arr);//释放神经元数组json
	//cJSON_Delete(json_nn_layer_obj);//释放神经元层对象json


	cJSON* json_nn = cJSON_CreateObject();//创建总对象
	cJSON_AddItemToObject(json_nn, "layers", json_nn_arr);//保存总数组到总对象
	//cJSON_Delete(json_nn_arr);//释放神经元层数组json
	string result = cJSON_Print(json_nn);//json转换为字符串
	cJSON_Delete(json_nn);//释放总对象
	return result;
}

//释放所有占用资源(将删除所存神经网络，如需，先保存)
void NN::free_all_source()
{
	//for (int num1 = 0; num1 < nn_layer; num1++)//每个神经元层
	//{
	//	for (int num2 = 0; num2 < (*nn_layer_num)[num1]; num2++)//每个神经元
	//	{
	//		free(((*nn_layer_p)[num1])[num2]);
	//	}
	//	free(nn_layer_p[num1]);
	//}
	//free(nn_layer_p);
	return;
}


NN main_nn;


//转码  记得free()
char* UTF8toANSI(const char* str)
{
	int textlen;
	wchar_t* turn;
	textlen = MultiByteToWideChar(CP_UTF8, 0, str, -1, NULL, 0);
	turn = (wchar_t*)malloc((textlen + 1) * sizeof(wchar_t));
	memset(turn, 0, (textlen + 1) * sizeof(wchar_t));
	MultiByteToWideChar(CP_UTF8, 0, str, -1, turn, textlen);
	char* result = { 0 };
	textlen = WideCharToMultiByte(CP_ACP, 0, turn, -1, NULL, 0, NULL, NULL);
	result = (char*)malloc((textlen + 1) * sizeof(char));
	memset(result, 0, sizeof(char) * (textlen + 1));
	WideCharToMultiByte(CP_ACP, 0, turn, -1, result, textlen, NULL, NULL);
	free(turn);
	return result;
}

//转码  记得free()
char* ANSItoUTF8(const char* str)
{
	int textlen;
	wchar_t* turn;
	textlen = MultiByteToWideChar(CP_ACP, 0, str, -1, NULL, 0);
	turn = (wchar_t*)malloc((textlen + 1) * sizeof(wchar_t));
	memset(turn, 0, (textlen + 1) * sizeof(wchar_t));
	MultiByteToWideChar(CP_ACP, 0, str, -1, turn, textlen);
	char* result;
	textlen = WideCharToMultiByte(CP_UTF8, 0, turn, -1, NULL, 0, NULL, NULL);
	result = (char*)malloc((textlen + 1) * sizeof(char));
	memset(result, 0, sizeof(char) * (textlen + 1));
	WideCharToMultiByte(CP_UTF8, 0, turn, -1, result, textlen, NULL, NULL);
	free(turn);
	return result;
}

//按照意愿转换输出结果
string get_output_trans()
{
	int last_layer_nus_num = main_nn.get_layer_nus_num(main_nn.get_layer_num() - 1);
	int* output_arr = (int*)malloc(sizeof(int) * last_layer_nus_num);
	main_nn.output(output_arr);
	unsigned int op = 0;
	string ret;
	char nuop_num[5] = { 0,0,0,0,0 };
	memset(nuop_num, '\0', 5);
	_itoa_s(output_arr[0], nuop_num, 10);
	ret += nuop_num;
	ret += ",";
	if (output_arr[0] > 0)
	{
		ret += "蓝";
	}
	else
	{
		ret += "无";
	}
	//cout << op << endl;
	free(output_arr);
	return ret;
}

//逆向翻译结果（返回值第一位是数据长度（总长度-1））（结果记得free）
void de_trans_output(string result, int* output)
{
	int oplen = main_nn.get_layer_nus_num(main_nn.get_layer_num() - 1);
	for (int num0 = 0; num0 < oplen; num0++)
	{
		output[num0] = -255;
	}
	int opc = 0;
	if (result == "蓝")
	{
		opc = 255;
	}
	else
	{
		opc = -255;
	}

	output[0] = opc;
	return;
}

//加载神经网络
void load_nn()
{
	FILE* nn_json;
	fopen_s(&nn_json, NN_filename, "r");
	fseek(nn_json, 0, SEEK_END);
	long flen = ftell(nn_json);
	char* nn_str = (char*)malloc(flen * sizeof(char));
	fseek(nn_json, 0, SEEK_SET);
	fread(nn_str, 1, flen, nn_json);
	fclose(nn_json);
	main_nn.build(nn_str, nnnu_BORDER_of_init_rand_weight_t);
	return;
}

//数据第一行做标题不计算最后一列做结果用于判断损失
//训练神经网络模型 参数 training_data csv训练数据 update是否更新 upd_per_evo每更新X次后进化一次(必须先满足更新true)0则为否 evo_per_ref每进化X次后重构(必须先满足进化true)0则为否
string train(string training_data, bool update, unsigned char upd_per_evo, unsigned char evo_per_ref)
{
	training_data = training_data.substr(0, training_data.find_last_of('\n') + 1);
	int width = -1;//单行数据量减一
	int pos = training_data.find('\n');
	int upd_time = 0, evo_time = 0, ref_time = 0;
	float accurate = 0.0;
	string ret;
	//读取第一行判断每行数据量（第一行视作标题，不包含入计算集）(最后一行视作乱码删去)
	if (pos > 0)
	{
		string first_line = training_data.substr(0, pos);
		ret += first_line.c_str();
		ret += ",neural_1_output,result,lost,accurate,update_time,evolution_time,refactor_time\n";
		int cpos = first_line.find(',');
		while (cpos != string::npos)
		{
			cpos = first_line.find(',');
			first_line = first_line.substr(cpos + 1);
			width++;
		}
		training_data = training_data.substr(pos + 1);
	}
	//读取剩余行获取数据
	for (int num1 = 0;; num1++)
	{
		int line_end = training_data.find('\n');
		if (line_end <= 0)
			break;
		string line = training_data.substr(0, line_end);//获取该行数据（不包含\n）
		ret += line.c_str();//保存该行原始数据到返回值字符串
		unsigned char* data_line = (unsigned char*)malloc(sizeof(unsigned char) * (width));
		for (int num2 = 0; num2 < width; num2++)
		{
			int uint_end = line.find(',');
			if (uint_end == string::npos)
			{
				break;
			}
			string uint = line.substr(0, uint_end);
			line = line.substr(uint_end + 1);
			int ipv = abs(atoi(uint.c_str()));
			data_line[num2] = ipv > 255 ? 255 : ipv;//控制最高255以免溢出
		}
		string res_pre = line.c_str();
		main_nn.reflesh();
		main_nn.input(data_line, width + 1);
		main_nn.nnnu_reaction(upd_per_evo, evo_per_ref);

		string opt = get_output_trans();
		ret += ',';
		ret += opt.c_str();

		double loss = 0;
		if (update)
		{
			int* det_res = (int*)malloc(sizeof(int) * (main_nn.get_layer_nus_num(main_nn.get_layer_num() - 1) + 1));
			de_trans_output(res_pre, det_res);
			string match_acc = opt.substr(opt.find_last_of(',') + 1);
			if (res_pre == "蓝" && match_acc == "蓝")
			{
				loss = main_nn.update(det_res, true);
				accurate += 1.0;
			}
			else if (match_acc == "无" && !(res_pre == "蓝"))
			{
				loss = main_nn.update(det_res, true);
				accurate += 1.0;
			}
			else
			{
				loss = main_nn.update(det_res, false);
			}

			upd_time++;
			if (upd_per_evo)
			{
				if (upd_time % upd_per_evo == 0)
				{
					main_nn.evolution();
					evo_time++;
					if (evo_per_ref)
					{
						if (evo_time % evo_per_ref == 0)
						{
							main_nn.refactor();
							ref_time++;
						}
					}
				}
			}
		}
		char loss_str[16];
		memset(loss_str, '\0', 16);
		_gcvt_s(loss_str, loss, 6);
		ret += ',';
		ret += loss_str;

		char accurate_str[16];
		memset(accurate_str, '\0', 16);
		_gcvt_s(accurate_str, accurate / (num1 + 1.0), 6);
		ret += ',';
		ret += accurate_str;

		ret += ',';
		char upd_time_str[data_MAX_collect_time_BYTE];
		_itoa_s(upd_time, upd_time_str, 10);
		ret += upd_time_str;
		ret += ',';
		char evo_time_str[data_MAX_collect_time_BYTE];
		_itoa_s(evo_time, evo_time_str, 10);
		ret += evo_time_str;
		ret += ',';
		char ref_time_str[data_MAX_collect_time_BYTE];
		_itoa_s(ref_time, ref_time_str, 10);
		ret += ref_time_str;
		ret += '\n';
		if (line_end + 1 == training_data.length())
			break;
		training_data = training_data.substr(line_end + 1);
		if (training_data.length() == 0)
			break;
	}
	return ret;
}

//将默认数据文件数据训练后结果返回默认结果文件，参数： 是否更新  X次更新后一次进化(更新必须true才有效 X为0时为不进化)  X次进化后一次重构(进化必须true才有效 X为0时为不重构)
void train_regu(bool update, unsigned char evolution, unsigned char refactor)
{
	FILE* data_input_file;
	fopen_s(&data_input_file, DATA_filename, "r");
	fseek(data_input_file, 0, SEEK_END);
	long flen = ftell(data_input_file);
	fseek(data_input_file, 0, SEEK_SET);
	char* str = (char*)malloc(sizeof(char) * (flen + 1));
	fread(str, 1, flen + 1, data_input_file);
	char* nstr = UTF8toANSI(str);
	FILE* result_log_file;
	fopen_s(&result_log_file, RESULT_filename, "w");
	string result = train(nstr, update, evolution, refactor);
	fwrite(result.c_str(), 1, result.length(), result_log_file);
	free(str);
	free(nstr);
	fclose(data_input_file);
	fclose(result_log_file);
	return;
}

//保存、释放、退出神经网络
void save_free_quit()
{
	ofstream nn_file;
	nn_file.open(NN_filename);
	nn_file << main_nn.save();
	main_nn.free_all_source();
	nn_file.close();
	return;
}


int main()
{
	load_nn();//加载神经网络
	train_regu(true, 0, 0);//照常训练
	save_free_quit();//保存、释放、退出
	return 0;
}
