﻿/*
* 版权所有 东海仙人岛 2025年3月 
* B站：https://space.bilibili.com/627167269
* 功能需求：
* 手写数组输入功能，可视化展示机器学习神经网络的结果，模拟神经元链接，动态展示识别过程的变化
* 设计方案：
* 界面显示，采用cvui; 权重文件的读取，采用opencv 4.10.0
* 网络结构，输入层 28*2 
* 激活函数 
* 使用说明：
* 权重文件，采用MNIST训练的数据集
*左键书写，右键擦除，esc按键退出.左侧的复选框可以显示或隐藏神经元连线。
*2025-3-8 更新记录：位于之前用32为float数，导致负无穷小和无穷大的数判断出现问题，修改为64位数参与计算。
*/
#include<iostream>
#include <torch/torch.h>
#include <opencv2/opencv.hpp>
#include <string>
#include <cstring>
#include <fstream>
#include <opencv2/core/utils/logger.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "CThread.h"
#include <filesystem>
#include <torch/data/datasets/tensor.h>  // 注意：TensorDataset 定义在这里

using namespace std;
using namespace cv;
using namespace cv::ml;

// One (and only one) of your C++ files must define CVUI_IMPLEMENTATION
// before the inclusion of cvui.h to ensure its implementaiton is compiled.
#define CV_TERMCRIT_ITER    1
#define CV_TERMCRIT_NUMBER  CV_TERMCRIT_ITER
#define CV_TERMCRIT_EPS     2
#define CVUI_IMPLEMENTATION

#include "cvui.h"
#include "cv_puttextzh.h"

#define WINDOW_NAME "机器学习手写数字输入可视化"
struct draw_s {
	int x;
	int y;
	float v;
};
struct draw_sHistory {
	int x;
	int y;
	float v;
	float vOld;
};
//调色板
uchar doubleColorValue_R = 31;
uchar doubleColorValue_G = 131;
uchar doubleColorValue_B = 231;
uchar doubleColorValue_A = 128;
unsigned int  bdColor;
unsigned int  filColor;
//书写方格子
draw_s g_inputLayerDraw[28*28];
cv::Rect g_inRect = { 400,480,7,7 };
//展开格子显示
draw_s g_inputLayerDraw784[28 * 28];
//隐藏层坐标和权重信息，64个计算结果
draw_s g_hidLayerDraw[64];
draw_sHistory g_hidLayerWeights[65][10];
//输出层坐标和权重信息，10个计算结果
draw_s g_outputLayerDraw[10];
int g_mouse_x;
int g_mouse_y;
cv::String g_ResultString = "";
int g_gradw = 30;
int g_gradh = 50;

uint8_t g_mouseKeyleftRight = 0;
//定义输出buffer
cv::AutoBuffer<double> _buf(1568 + 10);
//结果输出矩阵
Mat outputs;


// 检查 CUDA 是否可用
torch::Device device(torch::kCPU); // 默认选择 CPU
//cv::String trainEPOCH = "10";
cv::String labelhw = "";
const std::string weight_file = "../weights/mnist_cnn_weights.pt"; // 权重文件路径

const int batch_size = 64;
const double learning_rate = 0.01;
const int momentum = 0.5;
/**/
//将标签数据改为one-hot型
Mat one_hot(Mat label, int classes_num);
vector<int> layer_sizes= { 784,64,10 };;
double min_val, max_val, min_val1, max_val1;
vector<Mat> weights ;
RNG rng;
int paramstermCrittype = 3;
bool trained;
int max_lsize=784;
int g_train_cycle = 0;
double g_lost = 0;
float g_each_number_error[10] ;
float g_each_number_error2[10];
int g_train_count = 1;
cv::String trainresult = " ";

int layer_count() { return (int)layer_sizes.size(); }
int g_train_sleep_time = 1000;

int g_thickness = 2;
float tempW;
float filter_step = 0.0001;
//-----------------------------------thread
CMutex g_metux;
void startTrain();
torch::Tensor g_ts_conv77;
torch::Tensor g_ts_conv1414;
//保存卷积输出的三维数组
float conv_array[64][7][7];
float conv_array2[32][14][14];

struct Net : torch::nn::Module {
	torch::nn::Sequential conv_layers1{ nullptr }, conv_layers2{ nullptr }, fc_layers{ nullptr };

	Net() {
		conv_layers1 = torch::nn::Sequential(
			// 第一层卷积：将 1 个输入通道映射为 32 个特征图，
			// kernel_size=3, stride=1, padding=1 保持输入尺寸不变（28×28），
			// 后接 BatchNorm 和 ReLU，再经过 2×2 的MaxPool降采样为14×14
			torch::nn::Conv2d(torch::nn::Conv2dOptions(1, 32, 3).padding(1)),
			torch::nn::BatchNorm2d(32),
			torch::nn::ReLU(),
			torch::nn::MaxPool2d(2)
		);
		conv_layers2 = torch::nn::Sequential(

			// 第二层卷积：将 32 个特征图映射到 64 个特征图，
			// 同样采用3×3卷积，padding=1，保证尺寸保持为14×14，然后经过2×2 MaxPool降采样为7×7
			torch::nn::Conv2d(torch::nn::Conv2dOptions(32, 64, 3).padding(1)),
			torch::nn::BatchNorm2d(64),
			torch::nn::ReLU(),
			torch::nn::MaxPool2d(2)
		);

		fc_layers = torch::nn::Sequential(
			// 将卷积层输出的 64 个 7×7 特征图展平，总特征数 64*7*7，
			// 经过全连接层映射到128个神经元，ReLU激活后再Dropout防止过拟合，
			// 最后映射到10个类别的输出
			torch::nn::Linear(64 * 7 * 7, 128),
			torch::nn::ReLU(),
			torch::nn::Dropout(0.5),
			torch::nn::Linear(128, 10)
		);

		// 注册模块
		register_module("conv_layers1", conv_layers1);
		register_module("conv_layers2", conv_layers2);
		register_module("fc_layers", fc_layers);
	}
	/*
c1 x in sizes: [1, 1, 28, 28]
c1 x out sizes: [1, 32, 14, 14]
c2 x out sizes: [1, 64, 7, 7]
x view sizes: [1, 3136]
x fc sizes: [1, 10]
*/
	torch::Tensor forward(torch::Tensor x) {
		x = conv_layers1->forward(x);
		x = conv_layers2->forward(x);
		x = x.view({ x.size(0), -1 });//使用 view 方法将卷积层输出展平为一维向量。
		x = fc_layers->forward(x);
		return x;
	}

};

class TrainThread : public CThread
{
public:
	TrainThread(const std::string& strName)
		: m_strThreadName(strName)
	{
		cout << "train thread start." << endl;
	}

	~TrainThread()
	{
	}

public:
	virtual void Run()
	{
		startTrain(); //另外一个线程中启动训练开始，task没有while循环，所有只执行一次。第二次请退出再执行。
	}
private:
	std::string m_strThreadName;
};

//拟合函数 y=255⋅((x−1​)/9)^2
inline unsigned int  colorMaker(float conv_value)
{
	unsigned int  doubleColorValue_bg_R = 255;
	unsigned int  doubleColorValue_bg_G = 255;
	unsigned int  doubleColorValue_bg_B = 255;
	unsigned int  doubleColorValue_bg_A = 255;
	unsigned int ret = 0;

	float t = 0.0;
	if (conv_value <= 0)t = 255; //全白
	else if (conv_value >= 0 && conv_value < 1)t = 255.0 - (35.0 * conv_value);//255~200 之间
	else if (conv_value >= 1 && conv_value < 2)t = 273.0 - (71.0 * conv_value);//200~128 之间
	else if (conv_value >= 2 && conv_value < 3)t = 309.0 - (89 * conv_value); //128~0
	else if (conv_value >= 3 && conv_value < 12)t = 76.0 - (5 * conv_value); //128~0
	else t = 0;//全黑
	//cout << "color " << conv_value << " t " << t << endl;
	doubleColorValue_bg_B = (unsigned int)(t); //蓝
	doubleColorValue_bg_G = (unsigned int)(t) << 8;
	doubleColorValue_bg_R = (unsigned int)(t) << 16;

	doubleColorValue_bg_A = 1 << 24;
	ret = doubleColorValue_bg_B | doubleColorValue_bg_G | doubleColorValue_bg_R | doubleColorValue_bg_A;
	return ret;
}
//绘制两个卷积层 64
void hidLayerDraw(cv::Mat& frame)
{
	//第一次卷积
	int gradwh = 40;
	for (int i = 0; i < 32; i++)
	{
		int x = 225 + (i % 16) * (gradwh + 2);
		int y = 296 + 42 * (i / 16);
		cvui::rect(frame, x, y, gradwh, gradwh, 0xaaaaaa, 0xffa0a0a0);
		for (int h = 0; h < 14; ++h) {
			//cout  << endl;
			for (int w = 0; w < 14; ++w) {
				//cout << conv_array[i][h][w] << "  ";
				unsigned int c = colorMaker(conv_array2[i][h][w]);
				cvui::rect(frame, x + (3 * w), y + (3 * h), 3, 3, c, c);
			}
			//cout << endl;
		}
	}
	//第二层卷积
	gradwh = 28;
	for (int i = 0; i < 64; i++)
	{
		int x = 208 + (i % 32) * (gradwh + 2);
		int y = 228 + 30 * (i / 32);
		cvui::rect(frame, x, y, gradwh, gradwh, 0xaaaaaa, 0xffa0a0a0);
		for (int h = 0; h < 7; ++h) {
			for (int w = 0; w < 7; ++w) {
				unsigned int c = colorMaker(conv_array[i][h][w]);
				cvui::rect(frame, x + (4 * w), y + (4 * h), 4, 4, c, c);
			}
		}
	}
}

void startTrain()
{
#if USECUDA_BOOL_CONST
	if (!torch::cuda::is_available()) {
		std::cerr << "CUDA is not available. Switching to CPU..." << std::endl;
		device = torch::Device(torch::kCPU); // 如果 GPU 不可用，切换到 CPU
	}
	else {
		std::cerr << "CUDA is available...." << std::endl;
		device = torch::Device(torch::kCUDA); // 如果 GPU 不可用，切换到 CPU
	}
	std::cout << "USECUDA_BOOL_CONST is true" << std::endl;
#else
	std::cout << "USECUDA_BOOL_CONST is false" << std::endl;
#endif
	try {
		// 数据加载与转换
		if (std::filesystem::exists("../data/mnist")) {
			std::cout << "数据集文件存在 ../data/mnist" << std::endl;
		}
		else {
			std::cout << "../data/mnist MNIST文件不存在，退出。" << std::endl;
			return;
		}
		auto train_dataset = torch::data::datasets::MNIST("../data/mnist")
			.map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
			.map(torch::data::transforms::Stack<>());

		auto train_loader = torch::data::make_data_loader<torch::data::samplers::RandomSampler>(
			std::move(train_dataset), batch_size);
		// 打印 batch size
		std::cout << "Batch size: " << batch_size << std::endl;

		// 实例化模型、损失函数和优化器
		auto model = std::make_shared<Net>();
		// 加载预训练权重

		// 检查文件是否存在
		int64_t epoch_count = 0;
		if (std::filesystem::exists(weight_file)) {
			std::cout << "权重文件存在，基于之前的权重继续训练。" << weight_file << std::endl;
			// 你可以在这里加载图像
			// 创建输入 Archive 并从文件加载数据
			torch::serialize::InputArchive archive;
			archive.load_from(weight_file);

			// 加载模型权重
			model->load(archive);
			std::cout << "Model weights loaded successfully!" << std::endl;

			// 读取训练轮数
			try {
				torch::Tensor epoch_tensor;
				archive.read("epoch_count", epoch_tensor);
				epoch_count = epoch_tensor.item<int64_t>();
				std::cout << "[Weight info]:Training Epoch Count: " << epoch_count << std::endl;
			}
			catch (const c10::Error& e) {
				std::cerr << "Warning: 'epoch_count' not found!" << std::endl;
			}

			// 读取训练完成时间
			try {
				torch::Tensor time_tensor;
				archive.read("finished_time", time_tensor);
				auto time_data = time_tensor.data_ptr<int64_t>();
				std::string finished_time;
				for (size_t i = 0; i < time_tensor.size(0); ++i) {
					finished_time += static_cast<char>(time_data[i]);
				}
				std::cout << "Training Finished Time: " << finished_time << std::endl;
			}
			catch (const c10::Error& e) {
				std::cerr << "[Weight info]:Warning: 'finished_time' not found !" << std::endl;
			}
		}
		else {
			std::cout << "权重文件不存在,训练结束后会生成新的 " << weight_file << std::endl;
		}

		model->to(device); // 将模型迁移到 GPU
		torch::optim::SGD optimizer(model->parameters(), torch::optim::SGDOptions(learning_rate).momentum(momentum));

		// 训练
		size_t batch_index = 0;
		for (int epoch = 0; epoch < g_train_count ; ++epoch) {
			model->train();
			for (auto& batch : *train_loader) {
				auto data = batch.data.to(device); // 数据迁移到 GPU
				auto target = batch.target.to(device); // 标签迁移到 GPU
				//std::cout << "Tensor [data] Size :" << data.sizes() << " Type: " << data.dtype()
				//	 << "Device: " << data.device() << std::endl;

				// 获取第一个样本的子张量，并展平它
				torch::Tensor flat_tensor = data[0].view(-1).clone().to(torch::kCPU);

				torch::Tensor flat_target = target[0].view(-1).clone().to(torch::kCPU);
				// 将展平后的张量转换为 std::vector<float>
				std::vector<float> flat_array(flat_tensor.data_ptr<float>(), flat_tensor.data_ptr<float>() + flat_tensor.numel());
				std::vector<INT64> flat_targetarry(flat_target.data_ptr<INT64>(), flat_target.data_ptr<INT64>() + flat_target.numel());
				for (int i = 0; i < 784; i++)
				{
					g_inputLayerDraw[i].v = (flat_array[i]>1)?255:0;
				}
				//输出0-9的刷新，通过读出来的标签信息
			
				memset(g_outputLayerDraw, 0, sizeof(g_outputLayerDraw));
				g_outputLayerDraw[flat_targetarry[0]].v =1 ;//显示预测结果
				Sleep(g_train_sleep_time); //子线程挂起，演示页面刷新慢一点

				optimizer.zero_grad();
				//auto output = model->forward(data); //Tensor[output] Size :[64, 10] Type float Device: cpu 
				//std::cout << "Tensor[output] Size :" << output.sizes() << " Type " << output.dtype()
				//	<< " Device: " << output.device() << std::endl;
				//为了获取每次卷积之后的结果，把forward差分
				auto c1= model->conv_layers1->forward(data);
				auto c2 = model->conv_layers2->forward(c1);
				auto x = c2.view({ c2.size(0), -1 });//使用 view 方法将卷积层输出展平为一维向量。
				auto f = model->fc_layers->forward(x);
				auto parameters = model->named_parameters();

				g_ts_conv1414 = c1.clone().to(torch::kCPU);
				g_ts_conv77 = c2.clone().to(torch::kCPU);
				//把卷积结果赋值给三维数组

				int n = g_ts_conv77.numel();

				if (n > 0) {
					g_ts_conv77 = g_ts_conv77.to(torch::kFloat32);
					//cout << g_ts_conv77[0][0].sizes() << endl;
					//std::cout << "Tensor dtype:" << g_ts_conv77.dtype() <<"dev type"<< g_ts_conv77.device() << std::endl;
					//std::cout << "max:" << g_ts_conv77.max()  << "min " << g_ts_conv77.min() << "mean " << g_ts_conv77.mean() << std::endl;

					auto accessor = g_ts_conv77.accessor<float, 4>();  // 4 维访问器：[batch, channel, height, width]
					for (int c = 0; c < 64; ++c) {
						for (int h = 0; h < 7; ++h) {
							for (int w = 0; w < 7; ++w) {
								conv_array[c][h][w] = accessor[0][c][h][w];  // batch 下标为 0
							}
						}
					}
				}
				if (g_ts_conv1414.numel() > 0) {
					g_ts_conv1414 = g_ts_conv1414.to(torch::kFloat32);

					auto accessor2 = g_ts_conv1414.accessor<float, 4>();  // 4 维访问器：[batch, channel, height, width]
					for (int c = 0; c < 32; ++c) {
						for (int h = 0; h < 14; ++h) {
							for (int w = 0; w < 14; ++w) {
								conv_array2[c][h][w] = accessor2[0][c][h][w];  // batch 下标为 0
							}
						}
					}
				}
				for (auto& parameter : parameters)
				{
					//std::cout << parameter << std::endl;
				}
				auto loss = torch::nn::functional::cross_entropy(f, target);
				loss.backward();
				optimizer.step();

				if (++batch_index % 10 == 0) {
					std::cout << "Epoch [" << epoch + 1 << "] Batch [" << batch_index
						<< "] Loss: " << loss.item<double>() << std::endl;
					g_lost = loss.item<double>();
					
				}
				g_train_cycle = batch_index * (epoch + 1)* batch_size;
			}
			std::cout << "train time ： " << epoch + 1 << std::endl;
		}

		// 保存权重
		model->to(torch::kCPU);//为了兼容CPU模式
		// 构造元数据：记录训练轮数和训练完成的时间
		// 创建输出 Archive，并保存模型参数
		torch::serialize::OutputArchive archive;
		model->save(archive);

		// 将训练轮数转换为 int64_t Tensor 后写入 archive
		std::cout << "本次完成训练次数： " << g_train_count << std::endl;
		std::cout << "之前训练次数： " << epoch_count << std::endl;
		trainresult = "训练结束,训练轮数" + to_string(epoch_count);
		archive.write("epoch_count", torch::tensor(static_cast<int64_t>(g_train_count + epoch_count))); //训练测试累计

		// 获取当前时间并转换为字符串
		std::time_t now = std::time(nullptr);
		char time_buffer[80];
		std::strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
		std::string finished_time_str(time_buffer);
		// 将字符串转换为一个 int64_t 数组（ASCII 编码形式）
		std::vector<int64_t> finished_time_ascii;
		for (char ch : finished_time_str) {
			finished_time_ascii.push_back(static_cast<int64_t>(ch));
		}
		archive.write("finished_time", torch::tensor(finished_time_ascii, torch::TensorOptions(torch::kInt64)));

		// 将 Archive 保存到文件中
		archive.save_to(weight_file);
		std::cout << "Model weights and metadata saved to " << weight_file << std::endl;

	}
	catch (const std::exception& e) {
		std::cerr << "Runtime error: " << e.what() << std::endl;
		return;
	}
}


//将标签数据改为one-hot型
Mat one_hot(Mat label, int classes_num)
{
	int rows = label.rows;
	Mat one_hot = Mat::zeros(rows, classes_num, CV_32FC1);
	for (int i = 0; i < label.rows; i++)
	{
		int index = label.at<int32_t>(i, 0);
		if (index >= 0 && index < classes_num) {
			one_hot.at<float>(i, index) = 1.0;
		}
		else {
			cerr << "Invalid label index at row " << i << ": " << index << endl;
		}
	}
	return one_hot;
}

//鼠标按键移动处理函数
void mouseAction(cv::Mat &frame)
{
	cv::Rect rectangleL(130, 10, 20, 20);
	cv::Rect rectangleR(150, 10, 20, 20);
	cvui::rect(frame, rectangleL.x, rectangleL.y, rectangleL.width, rectangleL.height, 0xaaaaaa, 0xdff000000);
	cvui::rect(frame, rectangleR.x, rectangleR.y, rectangleR.width, rectangleR.height, 0xaaaaaa, 0xdff000000);

	g_mouse_x = cvui::mouse().x;
	g_mouse_y = cvui::mouse().y;
	cvui::printf(frame, 10, 10, "(%d,%d)", cvui::mouse().x, cvui::mouse().y);
	// Did any mouse button go down? 按下的时刻调用一次
	if (cvui::mouse(cvui::DOWN)) {
		// Position the rectangle at the mouse pointer.
		//cvui::text(frame, 10, 70, "<-");
	}

	// Is any mouse button down (pressed)? //按下之后一直回调，适合按下之后鼠标书写
	if (cvui::mouse(cvui::IS_DOWN)) {
		// Adjust rectangle dimensions according to mouse pointer
	}

	// Did any mouse button go up?
	if (cvui::mouse(cvui::UP)) {
		// Hide the rectangle
	}

	// Was the mouse clicked (any button went down then up)?
	if (cvui::mouse(cvui::CLICK)) {
		//cvui::text(frame, 10, 70, " clicked!");
	}
	if (cvui::mouse(WINDOW_NAME, cvui::LEFT_BUTTON, cvui::IS_DOWN))
	{
		//cvui::text(frame, 10, 70, "<-");
		g_mouseKeyleftRight = 255;
		cvui::rect(frame, rectangleL.x, rectangleL.y, rectangleL.width, rectangleL.height, bdColor, filColor);
	}
	if (cvui::mouse(WINDOW_NAME, cvui::RIGHT_BUTTON, cvui::IS_DOWN))
	{
		//cvui::text(frame, 10, 70, "->");
		g_mouseKeyleftRight = 0;
		cvui::rect(frame, rectangleR.x, rectangleR.y, rectangleR.width, rectangleR.height, 0xaaaaaa, 0xdaaaa0000);
	}
}

//绘制手写板
void inputLayerDraw(cv::Mat& frame)
{
	//g_inputLayerDraw
	for (int j = 0; j < 28; j++)
		for (int i = 0; i < 28; i++)
		{
			if (g_inputLayerDraw[j * 28 + i].v == 255) {
				cvui::rect(frame, g_inRect.x + i * g_inRect.width, g_inRect.y + g_inRect.height * j, g_inRect.width, g_inRect.height, bdColor, filColor); //手写
			}
			else {
				cvui::rect(frame, g_inRect.x + i * g_inRect.width, g_inRect.y + g_inRect.height * j, g_inRect.width, g_inRect.height, 0xaaaaaa, 0xffa0a0a0);//画背景
			}
		}
	//cout << "thread main ----------->" << endl;
}

//绘制手写展开区
void inputExpansionLayerDraw(cv::Mat& frame) //
{
	int count = 0;
	int gradwh = 7;
	for (int j = 0; j < 7; j++)
		for (int i = 0; i < 120; i++,count++)
		{
			//cvui::rect(frame, 80 + i * (gradwh+2),  360 + (gradwh+2) * j, gradwh, gradwh, 0xaaaaaa, 0xffa0a0a0);
			if (g_inputLayerDraw[count].v == 255) {
				
				//0xaa0000  0x00880000手写笔记颜色，数越小，颜色越深 //acolor
				cvui::rect(frame, 80 + i * (gradwh + 2), 394 + (gradwh + 4) * j , gradwh, gradwh, bdColor, filColor);
			}
			else {
				cvui::rect(frame, 80 + i * (gradwh + 2), 394 + (gradwh + 4) * j, gradwh, gradwh, 0xaaaaaa, 0xffa0a0a0);//画背景
			}
			g_inputLayerDraw784[count].x = 80 + i * (gradwh + 2);
			g_inputLayerDraw784[count].y = 394 + (gradwh + 4) * j;
			//g_inputLayerDraw784[count].v = g_inputLayerDraw[count].v;
			if (count >= 784 -1)break;
		}
}

//输出层
void outputLayerDraw(cv::Mat& frame)
{
	//int gradwh = 30;
	int x = 313, y = 108;
	int x2 = 191, y2 = 300;
	cv::Point2i pt1, pt2,pt3,pt4;
	cv::Scalar color1, color2;
	for (int i = 0; i < 10; i++)
	{
		cvui::rect(frame, x + i * (g_gradw + 15), y , g_gradw, g_gradh, 0xaaaaaa, 0xffa0a0a0);
		cvui::text(frame, x + i * (g_gradw + 15)+5, y-20, to_string(i), 0.7, 1);
		g_outputLayerDraw[i].x = x + i * (g_gradw + 15);
		g_outputLayerDraw[i].y = y;
		pt1 = cv::Point2i(x2, y2 + (8 * i + 8));
		pt2 = cv::Point2i( x2 + 100 * (2 - g_each_number_error[i]), y2 + (8 * i + 8));
		color1 = cv::Scalar(132, 135, 240);
		color2 = cv::Scalar(246, 130, 50);
		pt3 = cv::Point2i(x2+420, y2 + (8 * i + 8));
		pt4 = cv::Point2i(x2 +420 -100 * (2 - g_each_number_error2[i]), y2 + (8 * i + 8));
		//画出表示每个数字误差的矩形 gdata[k] _df.at<double>(0, d)
		//cvui::rect(frame, x2  , y2 +(8*i +8), 100 * (2-g_each_number_error[i]), 4, 0xaaaaaa, 0xffa0a0a0);
		//cvui::rect(frame, x2 +400, y2 + (8 * i + 8), 100 * (2 - g_each_number_error2[i]), 4, 0xaaaaaa, 0xffa0a0a0);
		//cv::line(frame, pt1, pt2, color1, 2, 8, 0);
		//cv::line(frame, pt3, pt4, color2, 2, 8, 0);
	}
}
int main(int argc, const char* argv[])
{
	cout << "@ 2025 DongHai XianRen\n控制台，运行时请勿关闭" << endl;
	bool Filter_0_001 = false;
	bool Filter_0_0001 = true;
	bool Filter_0_00001 = false;
	
	bool windowsShow = true;

	TrainThread trainthread("ThreadTrain");

	//outputs = Mat(1, 10, CV_32F, buf + 1568);
	outputs.create(1, 10, CV_64F);

	//for train
	 
	 //min_val = max_val = min_val1 = max_val1 = 0.;
	 max_val = 0.95; min_val = -max_val;
	 max_val1 = 0.98; min_val1 = -max_val1;

	utils::logging::setLogLevel(utils::logging::LOG_LEVEL_ERROR); //只打印错误信息
	// Create a frame where components will be rendered to.
	cv::Mat frame = cv::Mat(720, 1280, CV_8UC3);
	memset(g_outputLayerDraw, 0, sizeof(g_outputLayerDraw));

	// Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME).
	cvui::init(WINDOW_NAME);

	while (windowsShow) {
		//global color 
		unsigned int r, g, b,a;// = (unsigned int)g_inputLayerDraw784[count].v;
		r = doubleColorValue_B;
		g = doubleColorValue_G << 8;
		b = doubleColorValue_R << 16;
		a = doubleColorValue_A << 24;
		bdColor = r | g | b | a;
		filColor = bdColor;
		//filColor |= 0xff00000000;

		// Fill the frame with a nice color
		frame = cv::Scalar(255, 255, 255);

		// Render UI components to the frame
		//cvui::text(frame, 226, 13, "Back Propagation Visualization可视化",1,1);
		cvZH::putTextZH(frame,"机器学习反向传播可视化",	cv::Point(226, 13),	CV_RGB(0, 0, 0),30);
		//cvui::text(frame, 110, 120, "cvui is awesome!",1,1);
		//int x = ;
		
		cvui::text(frame, 890, 500, "A", 0.6, 1);
		cvui::trackbar(frame, 895, 485, 150, &doubleColorValue_A, (uchar)0, (uchar)255, 0, "%.0Lf");
		cvui::text(frame, 890, 540, "R", 0.6, 1);
		cvui::trackbar(frame, 895, 525, 150, &doubleColorValue_R, (uchar)0, (uchar)255, 0, "%.0Lf");
		cvui::text(frame, 890, 580, "G", 0.6, 1);
		cvui::trackbar(frame, 895, 565, 150, &doubleColorValue_G, (uchar)0, (uchar)255, 0, "%.0Lf");
		cvui::text(frame, 890, 620, "B", 0.6, 1);
		cvui::trackbar(frame, 895, 605, 150, &doubleColorValue_B, (uchar)0, (uchar)255, 0, "%.0Lf");
		// g_train_sleep time
		cvui::text(frame, 608, 580, "Speed", 0.5, 1);
		unsigned int options = cvui::TRACKBAR_DISCRETE | cvui::TRACKBAR_HIDE_SEGMENT_LABELS;
		cvui::trackbar(frame, 671, 565, 150, &g_train_sleep_time, (int)0, (int)2000, 20, "%.0Lf", options, (int)10);

		//鼠标的处理
		mouseAction(frame);
		inputLayerDraw(frame);
		inputExpansionLayerDraw(frame);
		//hidLayerDraw(frame);
		outputLayerDraw(frame);
		hidLayerDraw(frame);

		//显示预测结果
		cvui::text(frame, 777, 87, g_ResultString, 0.7, 1);
		
		//画出输出框的比例
		for (int i = 0; i < 10; i++)
		{
			//cout << i << "= " << g_outputLayerDraw[i].v << endl;
			//cout << g_outputLayerDraw[i].x << " " << g_outputLayerDraw[i].y << " " << g_gradh * g_outputLayerDraw[i].v + 1 << endl;
			if(g_outputLayerDraw[i].x > 0 && g_outputLayerDraw[i].y >0)
			cvui::rect(frame, g_outputLayerDraw[i].x, g_outputLayerDraw[i].y, g_gradw, g_gradh * g_outputLayerDraw[i].v + 1, bdColor, filColor); //手写
		}

		//button Start 事件处理
		if (cvui::button(frame, 666, 648, "Start")) {
			memset(g_inputLayerDraw, 0, sizeof(g_inputLayerDraw));
			memset(g_outputLayerDraw, 0, sizeof(g_outputLayerDraw));
			memset(g_hidLayerDraw, 0, sizeof(g_hidLayerDraw));
			memset(g_inputLayerDraw784, 0, sizeof(g_inputLayerDraw784));
			g_ResultString = "";

			//开始训练
			//startTrain();
			
			trainthread.Start();//线程开始调度，调用startTrain()
			//canmessageThread canthread("canthread");
			//canthread.Start();
		}
		if (cvui::button(frame, 1124, 8, "&Quit")) {
			break;
		}
		//if (cvui::button(frame, 57, 166, "Link")) {
		//	linkOut2hid(frame);
		//}
		//复选框
		/*cvui::text(frame, 36, 153, "Filter:", 0.4, 1);
		cvui::checkbox(frame, 37, 176, "0.001", &Filter_0_001);
		if (Filter_0_001) { Filter_0_0001 = Filter_0_00001 = false; filter_step = 0.001;};
		cvui::checkbox(frame, 37, 206, "0.0001", &Filter_0_0001);
		if (Filter_0_0001) { Filter_0_001 = Filter_0_00001 = false; filter_step = 0.0001; }
		cvui::checkbox(frame, 37, 236, "0.00001", &Filter_0_00001);
		if (Filter_0_00001) { Filter_0_001 = Filter_0_0001 = false; filter_step = 0.00001; }*/
		// Update cvui stuff and show everything on the screen
		//copyrighte of
		cvui::text(frame, 1004, 677, "@ 2025 DongHai XianRen", 0.4, 1);
		cvZH::putTextZH(frame, "卷积1: 32 14*14", cv::Point(37, 311), CV_RGB(0, 0, 0), 20);
		cvZH::putTextZH(frame, "卷积2: 64 7*7", cv::Point(41, 225), CV_RGB(0, 0, 0), 20);
		cvZH::putTextZH(frame, "全连接1: 1, 3136", cv::Point(53, 185), CV_RGB(0, 0, 0), 20);
		//cvZH::putTextZH(frame, "全连接2: 1, 10", cv::Point(53, 165), CV_RGB(0, 0, 0), 20);
		cvZH::putTextZH(frame, "输出层 10", cv::Point(153, 107), CV_RGB(0, 0, 0), 20);
		cvZH::putTextZH(frame, "手写板 28*28", cv::Point(444, 688), CV_RGB(0, 0, 0), 20);
		//训练图片个数，
		//cvui::text(frame, 51, 526, "Trained pic: " + to_string(g_train_cycle) + "/60000",0.5,1);
		cvZH::putTextZH(frame, "训练图片个数/总数" , cv::Point(51, 526), CV_RGB(0, 0, 0), 20);
		cvui::text(frame, 240, 533,  to_string(g_train_cycle) + "/60000*"+to_string(g_train_count), 0.5, 1);
		//cvui::text(frame, 51, 546, "  Deviation: " + to_string(g_lost) , 0.5, 1);
		cvZH::putTextZH(frame, "损失率lost", cv::Point(51, 546), CV_RGB(0, 0, 0), 20);
		cvui::text(frame, 161, 553,  to_string(g_lost), 0.5, 1);

		cvZH::putTextZH(frame, "训练轮数", cv::Point(58, 501), CV_RGB(0, 0, 0), 20);
		cvui::counter(frame, 229, 501, &g_train_count);
		cvZH::putTextZH(frame, trainresult.c_str(), cv::Point(64, 602), CV_RGB(0, 0, 0), 20);

		// This function must be called *AFTER* all UI components. It does
		// all the behind the scenes magic to handle mouse clicks, etc.
		cvui::update();
		cvui::imshow(WINDOW_NAME, frame);

		int keyvalue = cv::waitKey(100);
		if (keyvalue == 27 || keyvalue == 81) { //ESC Q 按键退出
			break;
		} 
		if (keyvalue == 67) { //C 清除
			memset(g_inputLayerDraw, 0, sizeof(g_inputLayerDraw));
			memset(g_outputLayerDraw, 0, sizeof(g_outputLayerDraw));
			memset(g_hidLayerDraw, 0, sizeof(g_hidLayerDraw));
			memset(g_inputLayerDraw784, 0, sizeof(g_inputLayerDraw784));
			g_ResultString = "";
		}
		if (getWindowProperty(WINDOW_NAME, WND_PROP_AUTOSIZE) != 1)
		{
			break;
		}	
	}
	destroyAllWindows();
	return 0;
}