﻿#include "unstackplate.h"
#include "../camera/camera.h"
#include "systemconfiguration.h"
#include <QFile>
#include <QPixmap>
#include "component_log.h"
#include "componentmanager.h"
#include "algorithm/AlgorithmManager.h"
#include "../../HttpServer/httpsend.h"


#define VIS_API ("/api/recognization/file")


UnstackPlate::UnstackPlate(const QString& id) : m_id(id)
{
}

UnstackPlate::~UnstackPlate()
{}

bool UnstackPlate::Init(QString& msg)
{
	auto cfg = syscfg->GetUnstackPlateCfg(m_id);
	if (cfg.id.isEmpty()) {
		msg = QString("未找到拆垛托盘%1的配置参数").arg(m_id);
		log_error("Get Plate [{}] config failed", m_id);
		return false;
	}

	// 获取相机，初始化相机
	auto camercfg = syscfg->GetCameraSetting(cfg.camera);
	if (camercfg.id != cfg.camera) {
		msg = QString("拆垛托盘%1的相机%1配置异常").arg(m_id).arg(cfg.camera);
		log_error("{}", msg);
		return false;
	}
	m_cameraId = camercfg.id;
	m_camera = Camera::GetCameraDevice(camercfg.type);
	if (m_camera == nullptr) {
		msg = QString("拆垛托盘%1获取相机实例失败，相机类型：%2").arg(m_id).arg(camercfg.type);
		log_error("{}", msg);
		return false;
	}
	CameraParam param = {};
	param.id = camercfg.id;
	param.ip = camercfg.ip;
	param.path = camercfg.picPath;
	int ret = m_camera->CameraInit(param);
	if (ret != 0) {
		msg = QString("拆垛托盘%1相机初始化失败，相机：%2").arg(m_id).arg(cfg.camera);
		log_error("{}", msg);
		return false;
	}

	auto algm = qobject_cast<AlgorithmManager*>(component->GetComponent(Component::component_algorithm));
	m_alg = algm->GetVisAlgObjectByName(camercfg.alg);
	if (m_alg == nullptr) {
		msg = QString("拆垛托盘%1相机算法加载失败，算法：%2").arg(m_id).arg(camercfg.alg);
		log_error("{}", msg);
		return false;
	}
	unstack::visAlgParam algP = {};
	algP.calibration = camercfg.biaoding;
	algP.dir = cfg.dir;
	algP.grabDir = 0;
	algP.maxNum = cfg.max;
	int algRet = m_alg->Init(algP);
	if (algRet != 0) {
		msg = QString("拆垛托盘%1相机算法初始化失败，ret：%2").arg(m_id).arg(algRet);
		log_error("{}", msg);
		return false;
	}

	m_visPath = camercfg.visPath + VIS_API;

	auto jigs = syscfg->GetJigSetting();
	for (auto& jig : jigs) {
		m_jigMap.insert(jig.id, jig);
	}

	return true;
}
structJ(RecRect,
	mapping(int, id);
	mappingt(int, x1, "P1X");
	mappingt(int, y1, "P1Y");
	mappingt(int, x2, "P2X");
	mappingt(int, y2, "P2Y");
	mappingt(int, x3, "P3X");
	mappingt(int, y3, "P3Y");
	mappingt(int, x4, "P4X");
	mappingt(int, y4, "P4Y");
	mapping(float, score);
	prop(&id, &x1, &x2, &x3, &x4, &y1, &y2, &y3, &y4, &score);
);

json_struct(RecResult,
	mapping(QList<RecRect>, data);
mapping(QString, path);
prop(&data, &path);
)

// 开始视觉识别
bool UnstackPlate::TakeVisualResult(const VisualParam& param, VisualResult& result)
{
	log_trace("Start snap");
	unstack::VisAlgInput input = {};
	//选择夹具
	if (m_jigMap.contains(param.jig)) {
		input.jig.length = m_jigMap[param.jig].size.length;
		input.jig.rob = m_jigMap[param.jig].rob;
		input.jig.type = m_jigMap[param.jig].type;
		input.jig.width = m_jigMap[param.jig].size.width;
	} else {
		result.msg = QString("未匹配到夹具%1的信息").arg(param.jig);
		log_error("{}", result.msg);
		return false;
	}
	input.sku = param.sku;
	input.taskNum = param.taskNum;

	// 1.拍照
	QString rgb, depth;
	if (!m_camera->SnapPicture(rgb, depth)) {
		result.msg = "拍照异常";
		log_error("{}", result.msg);
		return false;
	}
	m_pic.rgbPath = rgb;
	m_pic.depthPath = depth;
	log_trace("rgb:{},depth:{}", rgb, depth);
	input.rgb = rgb;
	input.depth = depth;
	// 彩色图识别
	QByteArray ans;
	if (!PostDeepLearning(rgb, ans)) {
		result.msg = "视觉服务获取结果失败";
		log_error("{}", result.msg);
		return false;
	}
	log_trace("recogresult:{}", ans);
	RecResult recog = {};
	recog.fromJson(ans);
	m_pic.visPath = recog.path();
	for (auto& iter : recog.data()) {
		unstack::BoxRect temp = {};
		temp.id = iter.id();
		temp.x1 = iter.x1();
		temp.x2 = iter.x2();
		temp.x3 = iter.x3();
		temp.x4 = iter.x4();
		temp.y1 = iter.y1();
		temp.y2 = iter.y2();
		temp.y3 = iter.y3();
		temp.y4 = iter.y4();
		input.boxs.append(temp);
	}
	// 算法识别
	unstack::VisAlgOutput output = {};
	bool ret = m_alg->Caculate(input, result.grabPos, output);
	if (!output.rgb.isEmpty()) {
		QPixmap image;
		if (image.loadFromData(output.rgb, "png")) {
			QString path = rgb.replace("color", "alg");
			image.save(path, "png");
			m_pic.algPath = path;
		} else {
			log_error("算法图片保存失败");
			m_pic.algPath = "";
		}
		
	} else {
		m_pic.algPath = "";
	}
	if (!ret) {
		result.msg = output.msg;
		log_error("{}", result.msg);
		return false;
	}
	result.num = output.num;
	result.jigInSku = output.jigInSku;
	log_trace("visual end");
	return true;
}
// 测试接口专用
bool UnstackPlate::SnapPicture(QString& rgb, QString& depth)
{
	if (!m_camera->SnapPicture(rgb, depth)) {
		log_error("拍照异常");
		return false;
	}
	return true;
}

bool UnstackPlate::RgbRcognize(const QString& rgb, QString& visPath, QByteArray& ans)
{
	// 彩色图识别
	if (!PostDeepLearning(rgb, ans)) {
		return false;
	}
	log_trace("recogresult:{}", ans);
	RecResult recog = {};
	recog.fromJson(ans);
	visPath = recog.path();
	return true;
}

structJ(TestResult,
	mapping(PositionJ, grab);
mapping(int, num);
mapping(PositionJ, jigInSku);
prop(&grab, &num, &jigInSku);
	)

bool UnstackPlate::Caculate(const TestAlgParam& param, QString& result, QByteArray& image)
{
	log_trace("Start snap");
	unstack::VisAlgInput input = {};
	input.rgb = param.rgbPath;
	input.depth = param.depthPath;
	//选择夹具
	if (m_jigMap.contains(param.jig)) {
		input.jig.length = m_jigMap[param.jig].size.length;
		input.jig.rob = m_jigMap[param.jig].rob;
		input.jig.type = m_jigMap[param.jig].type;
		input.jig.width = m_jigMap[param.jig].size.width;
	} else {
		result = QString("未匹配到夹具%1的信息").arg(param.jig);
		log_error("{}", result);
		return false;
	}
	input.sku = param.sku;
	input.taskNum = param.taskNum;
	RecResult recog = {};
	recog.fromJson(param.vis);
	m_pic.visPath = recog.path();
	for (auto& iter : recog.data()) {
		unstack::BoxRect temp = {};
		temp.id = iter.id();
		temp.x1 = iter.x1();
		temp.x2 = iter.x2();
		temp.x3 = iter.x3();
		temp.x4 = iter.x4();
		temp.y1 = iter.y1();
		temp.y2 = iter.y2();
		temp.y3 = iter.y3();
		temp.y4 = iter.y4();
		input.boxs.append(temp);
	}
		// 算法识别
	unstack::VisAlgOutput output = {};
	Position grab;
	bool ret = m_alg->Caculate(input, grab, output);
	if (!output.rgb.isEmpty()) {
		image = output.rgb;
	}
	if (!ret) {
		result = output.msg;
		log_error("{}", result);
		return false;
	}
	log_trace("visual end");
	TestResult rest = {};
	rest.grab() = grab.TransToPositonJ();
	rest.jigInSku() = output.jigInSku.TransToPositonJ();
	rest.num = output.num;
	result = rest.toByteArray();
	return true;
}




bool UnstackPlate::PostDeepLearning(const QString& rgb, QByteArray& result)
{
	QByteArray ans;
	QByteArray data = rgb.toUtf8();

	QString path = "http://127.0.0.1:8888/api/recognization/file";
	
	int ret = HttpSend::SendPostMsgByHttp(path, data, ans, false);
	if (ret == send_ok) {
		result = ans;
		// 保存结果
		QString f = rgb.left(rgb.lastIndexOf(".") + 1);
		f.append("txt");
		QFile file(f);
		if (file.open(QIODevice::WriteOnly)) {
			file.write(ans);
			file.close();
		}
		return true;
	} else {
		result = "视觉服务异常";
		log_error("send visual recognize data failed, ret = {}, ans = {}", ret, ans);
		return false;
	}
}
