/*
Copyright (c) [2019年5月1日] [吴超]
[MBT_studio] is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
		 http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
*/
#include "stdafx.h"
#include "神经网络.h"

#include "节点/插座/list/list物体插座.h"
#include "节点/插座/list/list数值插座.h"
#include "节点/插座/list/list矢量插座.h"

#include "ML/ML.h"
//#include "ML/intern/DDPG/unit.h"


C_强化学习节点::C_强化学习节点(S_设备环境& ctx) : C_节点基类(DEF_强化学习节点) {
	m_Name = L"物体节点";
	m_Ctx = ctx;

	C_插座基类* socket;
	DEF_创建F32_Array1D插座_I(L"姿态");
	DEF_创建F32_Array1D插座_O(L"动作");

	m_输入数量 = f_alloc_UI32Prop(nullptr, L"输入维度");
	m_输出数量 = f_alloc_UI32Prop(nullptr, L"输出维度");
	m_输入数量.m_私有 = true;
	m_输出数量.m_私有 = true;
}

C_强化学习节点::~C_强化学习节点() {

}

bool C_强化学习节点::f_update() {

	/*double total_reward = 0;
	double reward = 0;
	int state_size = 1;
	int action_size = 1;
	uint32 q_sizes[] = { state_size + action_size,8,8,1 };
	uint32 p_sizes[] = { state_size,8,8,action_size };
	int q_len = 3;
	int batch = 100;
	int reset = 0;
	network qnet = init_net(q_len, batch, q_sizes);
	network pnet = init_net(q_len, batch, p_sizes);
	ex r = init_ex(state_size, action_size, 10000);
	double* de = (double*)calloc(qnet.batch * (r.a_size), sizeof(double));
	states s = init_state(state_size, action_size);
	int kai = 1000;
	updatestate(&s, 0);
	for (int j = 0; j < 1000; j++) {
		for (int k = 0; k < kai; k++) {
			for (int i = 0; i < s.size; i++) {
				s.seq[i] = s.n_seq[i];
			}
			get_action(&pnet, s, s.seq);
			updatestate(&s, reset);
			reset = 0;
			reward = (5 - fabs(s.n_seq[0])) / 10.0;
			if (reward < 0) {
				reward = -1;

			}
			total_reward += reward;

			add_ex(&r, s.seq, reward, s.action, s.n_seq);
			if ((5 - fabs(s.n_seq[0])) < 0) {
				s.seq[0] = 0;
				s.n_seq[0] = 0;
				reset = 1;
			}
			update_p_and_q(&pnet, &qnet, r);

		}printf("mean_reward:%f\n", total_reward / kai);
		total_reward = 0;
		printf("qnet_loss:%f\n", qnet.loss);
	}

	Pnetwork_predict(s.seq, &pnet);
	printf("raw_y[ %f]\n", pnet.net3[pnet.len - 1].y[0]);

	for (int i = r.memmaxsize - 1000; i < r.memmaxsize; i++) {
		printf("seq[");
		for (int j = 0; j < r.s_size; j++) {
			printf("%f ", r.mem[i].new_state[j]);
		}printf("]");
		printf("p_seq[");
		for (int j = 0; j < r.s_size; j++) {
			printf("%f ", r.mem[i].p_state[j]);
		}printf("]");
		printf("action[");
		for (int j = 0; j < r.a_size; j++) {
			printf("%f ", r.mem[i].action[j]);
		}printf("]");
		printf("reward[%f]\n", r.mem[i].reward);
	}
	*/

	return false;
}

void C_强化学习节点::f_读取(FILE* f) {
	C_节点基类::f_读取(f);
}

void C_强化学习节点::f_写入(FILE* f) {
	C_节点基类::f_写入(f);
}

C_节点基类* f_node_加载强化学习节点(S_设备环境& ctx, FILE* f) {
	C_强化学习节点* node = new C_强化学习节点(ctx);
	return nullptr;
}

void f_node_保存强化学习节点(C_节点基类* n, FILE* f) {
	C_强化学习节点* node = static_cast<C_强化学习节点*>(n);
}
