#include "hit_ctrl/qlearning.h"


//动作数
#define ACTIONS 20 
//探索次数
#define episode 400 
//γ，折损率，取值是 0 到 1 之间。
#define gamma 0.7
//α，学习率，取值是0到1之间
float alpha_=0.9;//0.5


//------------------------颠球个数计数
int number_ball_bounce = 1;
//------------------------颠球的最高位置和颠球到达目标位置时的速度
float volleyball_highest;
float volleyball_targetpos_vel;

float last_volleyball_highest;
float last_volleyball_targetpos_vel;
//------------------------传给上位机的v,w和q值
train_point_t train_point;
//------------------------
float start_vel;
float current_state;
int current_action;
int init_flag = 0;
float current_action_point;
float next_state;
int step;
float next_state_max_q;
//-------------------------
int h_volleyball;
int finish_flag;
float h_err;
float vel_fall;
int q[20][20] = {0};
int p[20][20] = {0};
//-------------------------
int state_init_receive = 0;    //用于的获取训练初始化的标志位
int oumiga = 4.0;               //用于得到颠球的速度，oumiga越小，速度越大。 取值范围：3-5
float time_start_remain = 250.0;   //初始值


int state_init()
{
	srand((unsigned int)time(0));
	if(last_volleyball_highest>1500.0||last_volleyball_highest<700.0||last_volleyball_targetpos_vel>4.2||last_volleyball_targetpos_vel<1.4)  return 0;
	else
	{
			current_state = last_volleyball_targetpos_vel;
			current_action = rand() % ACTIONS;
			oumiga = current_action * 0.2 + 3.0;
			init_flag = 1 ;
		  return 1;
	}

}

float r_action(float h)
{
	h = h * 1000.0;  //100mm —— 900mm

	if(h>550.0) return -3.5 * h + 2625.0;
	else return 3.5 * h - 1225.0;

}

int state_array_trans(float a)
{
	return (int)((a - 1.4) / 0.14);

}

int max(int* p, int m)
{
	int i, max = *p;
	for (i = 1; i < m; i++)
	{
		if (*(p + i) > max)
		max = *(p + i);
	}
	return max;
}

//Q-learning训练过程
void Q_learning_train()
{          

			if(volleyball_highest>1500.0||volleyball_highest<700.0||volleyball_targetpos_vel>4.2||volleyball_targetpos_vel<1.4)
			{
							train_point.v = 0.0;
							
							train_point.w = 0.0;
							
							train_point.q = 0.0;
			}
			else
			{          
							h_err = (volleyball_highest - 600.0) / 1000.0;         //0.1 —— 0.9//计算差值    //700——1500  //上一次颠球颠球过后得到的最大高度       
							current_state = last_volleyball_targetpos_vel;         //获取上一次颠球的速度状态  //1.4 —— 4.2 = 2.8 / 20 = 0.14
							current_action_point = r_action(h_err);          //得到上一个状态的得分
							// if (current_action_point < 0.0)
							// {
							// 	if(current_action_point > q[state_array_trans(current_state)][current_action] && q[state_array_trans(current_state)][current_action])
							// 		q[state_array_trans(current_state)][current_action] = current_action_point;
									
							// 	if(! q[state_array_trans(current_state)][current_action])
							// 		q[state_array_trans(current_state)][current_action] = current_action_point;
								
							// }
							// else
							// {
									next_state = volleyball_targetpos_vel;
									next_state_max_q = max(&q[state_array_trans(next_state)][0], 20);
									q[state_array_trans(current_state)][current_action] += alpha_ * (current_action_point + gamma * next_state_max_q - q[state_array_trans(current_state)][current_action]);
									p[state_array_trans(current_state)][current_action] ++;
									//alpha_ *= alpha_;
							// }
							
							train_point.v = state_array_trans(current_state);
							
							train_point.w = current_action;
							
							train_point.q = q[state_array_trans(current_state)][current_action]*p[state_array_trans(current_state)][current_action];
							
							current_action = rand() % ACTIONS;

							oumiga = current_action * 0.2 + 3;
							
							last_volleyball_targetpos_vel = volleyball_targetpos_vel;
 			}
	
		
}


void Q_learning_use()
{
     		//选取最大的值来作为策略值
			int maxn=-0x7ffffff;
							
			for(int i=0;i<20;i++)
			{
				if(q[state_array_trans(current_state)][i]*p[state_array_trans(current_state)][current_action] > maxn)
				{
					maxn = q[state_array_trans(current_state)][i]*p[state_array_trans(current_state)][current_action];
					oumiga = i * 0.2 + 3;
				}
			}
							
			if(maxn <= 0) 
			{
				oumiga = 4.0;
			}
}

//用于初始化Q表矩阵
void Q_table_init()
{
	
}