// 
// Created By: Xiaofeng.Gu 2021/03/18
// 

#include "CybertronTaskRunnerPoolServer.hpp"
#include "EventHandlerClientNodeConnection.hpp"
#include "cybertron/core/UtilTime.hpp"
#include "cybertron/core/UtilFile.hpp"
#include "cybertron/core/Thread.hpp"
#include "cybertron/core/Frame.hpp"
#include "cybertron/core/UtilString.hpp"
#include "cybertron/core/UtilStateHelper.hpp"
//#include "cybertron/node/TaskRunnerPoolClient.hpp"
#include "cybertron/core/Log.hpp"
#include"TaskRunnerPoolRestSrv.hpp"
#include <thread>
#include "cybertron/core/UtilStateHelper.hpp"

CybertronTaskRunnerPoolServer::CybertronTaskRunnerPoolServer()
{
	zeroMembers();
}

CybertronTaskRunnerPoolServer::~CybertronTaskRunnerPoolServer()
{
	clearThis();
}

void CybertronTaskRunnerPoolServer::run()
{
	std::thread thread(&CybertronTaskRunnerPoolServer::runCheckNodeStatusThread, this);
	thread.detach();

	for (;;)
	{
		RunControlCommands();
		if (mStopped == true)
		{
			break;
		}
	}
}
long long CybertronTaskRunnerPoolServer::getNow() {
	auto now = std::chrono::high_resolution_clock::now();
	std::chrono::nanoseconds createTime = now.time_since_epoch();
	std::chrono::milliseconds createTimeMS((createTime.count() / 1000000));
	return createTimeMS.count();
}
void CybertronTaskRunnerPoolServer::runCheckNodeStatusThread() {
	while (true)
	{
		{
			std::unique_lock<std::mutex> statesLock(mNodeMapLock);
			
			for (auto it  = mNodeMap.begin(); it != mNodeMap.end(); it++)
			{
				if (it->second->state == TaskRunnerPool::ETaskRunnerPoolStateLock) {

					if (getNow() - it->second->lockTimestamp > 2*60 * 1000) {
						it->second->state = TaskRunnerPool::ETaskRunnerPoolStateFree;
						logInfo("Node Lock Time Out  Key : %s ETaskRunnerPoolStateLock ===> ETaskRunnerPoolStateFree", it->first.c_str());
					}
				}
			}

			for (auto it = mNodeDeleteList.begin(); it != mNodeDeleteList.end();)
			{
				TaskRunnerPoolClientNode*pNode = *it;
				if (getNow() - pNode ->deleteTimestamp> 60 * 1000){
					it = mNodeDeleteList.erase(it);
					string key = GetKey(pNode->groupId, pNode->id, pNode->type);
					logInfo("Delete Memory Node Key : %s", key.c_str());
					delete pNode;
					pNode = 0;
					
				}
				else {
					it++;
				}
			}
			
		}
		std::this_thread::sleep_for(std::chrono::milliseconds(5));
	}
}
bool CybertronTaskRunnerPoolServer::initialize()
{
	clearThis();
	return true;
}


void CybertronTaskRunnerPoolServer::onAccept(SocketBasePtr pRemoteSocket)
{
	if (!pRemoteSocket)
	{
		return;
	}

	std::shared_ptr<EventHandlerClientNodeConnection> pHandler =
		std::make_shared<EventHandlerClientNodeConnection>(this);
	pRemoteSocket->addHandler(pHandler);
}

void CybertronTaskRunnerPoolServer::zeroMembers()
{
	mStopped = false;
	mControlCommandQueue.clear();

	{
		std::unique_lock<std::mutex> statesLock(mNodeMapLock);
		mNodeMap.clear();
	}
}

void CybertronTaskRunnerPoolServer::clearThis()
{
	mControlCommandQueue.clear();

	{
		std::unique_lock<std::mutex> statesLock(mNodeMapLock);
		mNodeMap.clear();
	}
}

void CybertronTaskRunnerPoolServer::queueControlCommand(
	SocketBasePtr pSender,
	const Message& msg)
{
	if (!pSender)
	{
		return;
	}

	Command cmd;
	cmd.pSender = pSender;
	cmd.msg = msg;

	mControlCommandQueue.push_back(cmd);
}

void CybertronTaskRunnerPoolServer::OnDisConnectClient(SocketBasePtr pSender, TaskRunnerPoolClientNode* pNodeInfo)
{
	if (pSender)
	{
		
	}
	DeleteClientNode(pNodeInfo->groupId, pNodeInfo->id, pNodeInfo->type);
	
	logInfo("OnDisConnectClient!");
	return;
}

void CybertronTaskRunnerPoolServer::RunControlCommands()
{
	Command cmd;
	if (mControlCommandQueue.size() == 0) {
		std::this_thread::sleep_for(std::chrono::milliseconds(1));
	}
	while (mControlCommandQueue.pop_swap_front(cmd))
	{
		executeControlCommand(cmd);
	}
}

void CybertronTaskRunnerPoolServer::executeControlCommand(Command& cmd)
{
	std::uint16_t msgId = cmd.msg.parseMsgId();
	//logInfo("executeControlCommand ====>%d", msgId);
	switch (msgId)
	{
	case TaskRunnerPool::ETaskRunnerPoolRegisterReq:
		onHandleNodeRegisterRequestMessage(cmd);
		break;

	case TaskRunnerPool::ETaskRunnerPoolReportStateReq:
		onHandleNodeStateMessage(cmd);
		break;

	
	default:
		break;
	}
}


void CybertronTaskRunnerPoolServer::onHandleNodeRegisterRequestMessage(Command& cmd)
{
	TaskRunnerPool::TaskRunnerPoolRegisterReq registerRequest;
	if (!cmd.msg.toProtobuf(registerRequest))
	{
		return;
	}
	int code = 0;
	std::string id = registerRequest.nodeid();
	Common::EWorkNodeType worktype = registerRequest.worktype();
	std::string groupId = registerRequest.groupid();
	string type = UtilStateHelper::getWorkNodeTypeName(worktype);
	string key = GetKey(groupId, id, type);
	 
	TaskRunnerPoolClientNode* pNode = new TaskRunnerPoolClientNode;
	
	pNode->pSender = cmd.pSender;
	pNode->groupId = groupId;
	pNode->id = id;
	pNode->type = type;
	pNode->description = registerRequest.description();
	pNode->userData = registerRequest.userdata();
	pNode->tag = registerRequest.tag();
	std::vector<std::string> tagList;
	UtilString::split(tagList, pNode->tag, ';');

	pNode->tagList = tagList;

	pNode->state = TaskRunnerPool::ETaskRunnerPoolStateReadying;

	

	{
		std::unique_lock<std::mutex> lock(mNodeMapLock);

		NodeMap::iterator it = mNodeMap.find(key);
		if (it != mNodeMap.end())
		{
			*it->second = *pNode;
			code = -1;
			logInfo("Reqister Node %s, But Node Exist State:%s", key.c_str(), 
				UtilStateHelper::getTaskRunnerPoolStateName(pNode->state).c_str());
			
		}
		else {
			mNodeMap[key] = pNode;
			logInfo("Reqister Node %s State:%s", key.c_str(), 
				UtilStateHelper::getTaskRunnerPoolStateName(pNode->state).c_str());
		}		
	}
	
	TaskRunnerPool::TaskRunnerPoolRegisterRet ret;
	ret.set_code(code);
	cmd.pSender->send(TaskRunnerPool::ETaskRunnerPoolRegisterRet, ret);
}

void CybertronTaskRunnerPoolServer::onHandleNodeStateMessage(Command& cmd)
{
	TaskRunnerPool::TaskRunnerPoolReportStateReq data;
	if (!cmd.msg.toProtobuf(data))
	{
		return;
	}

	if (!cmd.pSender)
	{
		return;
	}

	bool succeed = true;
	
	std::string id = data.nodeid();
	Common::EWorkNodeType worktype = data.worktype();
	std::string groupId = data.groupid();
	string type = UtilStateHelper::getWorkNodeTypeName(worktype);
	
	string key = GetKey(groupId, id, type);
	
	
	logInfo("onHandleNodeStateMessage %s, state:%s", key.c_str(), UtilStateHelper::getTaskRunnerPoolStateName(data.state()).c_str());

	TaskRunnerPoolClientNode* pNodeInfo = FindClientNode(groupId, id, type);
	int code = 0;
	if (pNodeInfo) {
		pNodeInfo->state = data.state();
		
	}
	else {
		code = -1;
	}
	TaskRunnerPool::TaskRunnerPoolReportStateRet ret;
	
	ret.set_nodeid(data.nodeid());
	ret.set_worktype(data.worktype());
	ret.set_groupid(data.groupid());
	ret.set_state(data.state());
	ret.set_code(code);

	cmd.pSender->send(TaskRunnerPool::ETaskRunnerPoolReportStateRet, ret);
}



void CybertronTaskRunnerPoolServer::sendStateMessage(SocketBasePtr pSender, std::string groupId, std::string id, std::string type, TaskRunnerPool::ETaskRunnerPoolState state) {
	TaskRunnerPool::TaskRunnerPoolReportStateReq data;
	data.set_state(state);
	data.set_groupid(groupId);
	data.set_nodeid(id);
	data.set_worktype(UtilStateHelper::getWorkNodeTypeName(type));
	Message msg;
	msg.fromProtobuf(TaskRunnerPool::ETaskRunnerPoolReportStateReq, data);
	pSender->send(msg);

}
bool CybertronTaskRunnerPoolServer::IsExistNode(std::string groupId, std::string id, std::string type) {
	std::unique_lock<std::mutex> lock(mNodeMapLock);
	string key = GetKey(groupId, id, type);
	NodeMap::iterator it = mNodeMap.find(key);
	if (it != mNodeMap.end())
	{
		return  true;
	}
	return false;

}

string CybertronTaskRunnerPoolServer::GetKey(string groupId, string id, string type) {
	string key = groupId + "_" + id + "_" + type;
	return key;
}


TaskRunnerPoolClientNode* CybertronTaskRunnerPoolServer::FindClientNode(string groupId, string id, string type) {
	std::unique_lock<std::mutex> statesLock(mNodeMapLock);
	string key = GetKey(groupId, id, type);
	NodeMap::iterator it = mNodeMap.find(key);
	if (it != mNodeMap.end())
	{
		return  it->second;
	}
	return 0;
}

void CybertronTaskRunnerPoolServer::DeleteClientNode(string groupId, string id, string type)
{
	string key = GetKey(groupId, id, type);
	logInfo("Delete Node %s",key.c_str());
	std::unique_lock<std::mutex> statesLock(mNodeMapLock);
	NodeMap::iterator it = mNodeMap.find(key);
	if (it != mNodeMap.end())
	{
		it->second->deleteTimestamp = getNow();
		mNodeMap.erase(key);
		mNodeDeleteList.push_back(it->second);
	}
}
bool CybertronTaskRunnerPoolServer::GetFreeTaskNodes(string groupId, string type, std::vector<TaskRunnerPoolClientNode>& nodes) {
	std::unique_lock<std::mutex> statesLock(mNodeMapLock);
	for (auto it = mNodeMap.begin(); it!= mNodeMap.end(); it++)
	{
		if (it->second->groupId == groupId && it->second->type == type) {
			if (it->second->state == TaskRunnerPool::ETaskRunnerPoolStateFree) {
				nodes.push_back(*it->second);
			}
		}
	}
	return true;
}

bool CybertronTaskRunnerPoolServer::GetAlllTaskNodes(string groupId, string type, std::vector<TaskRunnerPoolClientNode>& nodes) {
	std::unique_lock<std::mutex> statesLock(mNodeMapLock);
	for (auto it = mNodeMap.begin(); it != mNodeMap.end(); it++)
	{
		if (it->second->groupId == groupId && it->second->type == type) {
				nodes.push_back(*it->second);
		}
	}
	return true;
}
void CybertronTaskRunnerPoolServer::Destroy()
{
	mStopped = true;
	close();
}
bool CybertronTaskRunnerPoolServer::SetAllNodeLock2Free() {
	std::unique_lock<std::mutex> statesLock(mNodeMapLock);
	for (auto it = mNodeMap.begin(); it != mNodeMap.end(); it++)
	{

		 if (it->second->state == TaskRunnerPool::ETaskRunnerPoolStateLock) {
			it->second->state = TaskRunnerPool::ETaskRunnerPoolStateReadying;

			logInfo("SetAllNodeLock2Free[%s]  State %s ===> %s", it->first.c_str(),
				UtilStateHelper::getTaskRunnerPoolStateName(TaskRunnerPool::ETaskRunnerPoolStateLock).c_str(),
				UtilStateHelper::getTaskRunnerPoolStateName(it->second->state).c_str());

			sendStateMessage(it->second->pSender, it->second->groupId, it->second->id, it->second->type, it->second->state);

		}
	}
	return true;
}
bool CybertronTaskRunnerPoolServer::UpdateNodeState( std::string groupId,  std::string type,  std::string id, TaskRunnerPool::ETaskRunnerPoolState state) {

	std::unique_lock<std::mutex> statesLock(mNodeMapLock);
	string key = GetKey(groupId, id, type);
	logInfo("UpdateNodeState Enter %s %d", key.c_str(), state);
	auto it = mNodeMap.find(key);
	if (it != mNodeMap.end()) {
		if (it->second->state == TaskRunnerPool::ETaskRunnerPoolStateFree && state == TaskRunnerPool::ETaskRunnerPoolStateLock) {

			
			logInfo("UpdateNodeState[%s]  State Lock Successful %s ===> %s", key.c_str(), 
				UtilStateHelper::getTaskRunnerPoolStateName(it->second->state).c_str(), 
				UtilStateHelper::getTaskRunnerPoolStateName(state).c_str());
			it->second->state = state;
			it->second->lockTimestamp = getNow();
			sendStateMessage(it->second->pSender, groupId, id, type, state);
			
		}
		else if (it->second->state == TaskRunnerPool::ETaskRunnerPoolStateLock && state == TaskRunnerPool::ETaskRunnerPoolStateReadying) {
			logInfo("UpdateNodeState[%s]  State Free Successful %s ===> %s", key.c_str(), 
				UtilStateHelper::getTaskRunnerPoolStateName(it->second->state).c_str(),
				UtilStateHelper::getTaskRunnerPoolStateName(state).c_str());
			it->second->state = state;
			sendStateMessage(it->second->pSender, groupId, id, type, state);
			
		}
		else {
			logError("UpdateNodeState[%s]  State  Error  %s ===> %s", key.c_str(), 
				UtilStateHelper::getTaskRunnerPoolStateName(it->second->state).c_str(),
				UtilStateHelper::getTaskRunnerPoolStateName(state).c_str());
		}
	}


	return true;
}
