from curses import curs_set
import math
import re
import numpy as np

MAX_ACC_JERK = 40
MAX_DELTA_JERK = 0.25
PSI_RANGE = [75, 90] # 航向角范围

# 纵向加速度
def r_acc(cavM_accs, cav1_accs, cavM_predict_accs, cav1_predict_accs):
  """
    说明: 
        1. 参数选取9是因为加速度的边界是3, 3的平方是9
  """
  reward = 0
  cavM_action_accs = np.concatenate((np.array(cavM_accs), np.array(cavM_predict_accs)), axis=0)
  cav1_action_accs = np.concatenate((np.array(cav1_accs), np.array(cav1_predict_accs)), axis=0)

  # 计算cavM的奖励
  for acc in cavM_action_accs:
    # 给予正向奖励
    if acc >= -2 or acc <= 2:
      reward += (9 - 9 * abs(acc))
    else:
      reward += -(acc)**2

  # 计算cav1的奖励
  for acc in cav1_action_accs:
    # 给予正向奖励
    if acc >= -2 or acc <= 2:
      reward += (9 - 9 * abs(acc))
    else:
      reward += -(acc)**2

  # 归一化
  total = len(cavM_action_accs) + len(cav1_action_accs)
  normalized_reward = reward / 9 / total
  return normalized_reward, reward

# 转向角
def r_delta(cavM_deltas, cavM_predict_deltas):
  reward = 0
  cavM_action_deltas = np.concatenate((np.array(cavM_deltas), np.array(cavM_predict_deltas)), axis=0)

  # 计算奖励
  for delta in cavM_action_deltas:
    if delta >= -0.2 or delta <= 0.2:
      # 给予正向奖励
      reward += (1 - 5 * abs(delta))
    else:
      reward += -(delta)**2

  # 归一化
  total = len(cavM_deltas) + len(cavM_predict_deltas)
  normalized_reward = reward / 0.5 / total
  return normalized_reward, reward

# 速度差
def r_v_diff(original_state, original_kalman_state, predict_state):
  reward = 0

  # 计算奖励
  hdvH_v_arr = np.concatenate((np.array([original_state[4]]), np.array(predict_state["hdvH_predict_states"])[:, 2]))
  cavM_v_y_arr = np.concatenate((np.array([original_kalman_state["cavM_filter_state"][4]]), np.array(predict_state["cavM_predict_states"])[:, 4]))
  cav1_v_arr = np.concatenate((np.array([original_state[7]]), np.array(predict_state["cav1_predict_states"])[:, 2]))
  hdvF_v_arr = np.concatenate((np.array([original_state[8]]), np.array(predict_state["hdvF_predict_states"])[:, 2]))

  for i in range(len(hdvH_v_arr)):
    hdvH_v = hdvH_v_arr[i]
    cavM_v_y = cavM_v_y_arr[i]
    cav1_v = cav1_v_arr[i]
    hdvF_v = hdvF_v_arr[i]
    
    # 速度差
    hdvH_cavM_v_diff = abs(hdvH_v - cavM_v_y)
    cavM_cav1_v_diff = abs(cavM_v_y - cav1_v)
    cav1_hdvF_v_diff = abs(cav1_v - hdvF_v)


    # hdvH和cavM的速度差
    # 允许速度差小于1
    if hdvH_cavM_v_diff > 0 and hdvH_cavM_v_diff <= 1:
      # 给予正向奖励
      reward += (15 - hdvH_cavM_v_diff)
    else:
      reward += -abs(hdvH_cavM_v_diff)

    # cavM和cav1的速度差
    if cavM_cav1_v_diff > 0 and cavM_cav1_v_diff <= 1:
      # 给予正向奖励
      reward += (15 - cavM_cav1_v_diff)
    else:
      reward += -abs(cavM_cav1_v_diff)

    # cav1和hdvF的速度差
    if cav1_hdvF_v_diff > 0 and cav1_hdvF_v_diff <= 1:
      # 给予正向奖励
      reward += (15 - cav1_hdvF_v_diff)
    else:
      reward += -abs(cav1_hdvF_v_diff)

  # 归一化。采用分段函数策略（速度差超过20，就直接置为-1，否则就映射到-1到0之间）
  normalized_reward = 0
  total = 3 * len(hdvH_v_arr)
  if reward < -15 * total:
    normalized_reward = -1
  else:
    normalized_reward = reward / 15 / total # [-15, 15] -> [-1, 1]

  return normalized_reward, reward

# 纵向间距
def r_y_diff(original_state, original_kalman_state, thdv, tcav, predict_state):
  reward = 0

  # 间距
  hdvH_cavM_y_diff = original_state[0]
  cavM_cav1_y_diff = original_state[5]
  cav1_hdvF_y_diff = original_state[6]

  # 速度
  cavM_v_y = original_kalman_state["cavM_filter_state"][4]
  cav1_v = original_state[7]
  hdvF_v = original_state[8]

  # 期望间距
  hdvH_cavM_min_y_diff = cavM_v_y * thdv
  cavM_cav1_min_y_diff = cav1_v * tcav
  cav1_hdvF_min_y_diff = hdvF_v * tcav

  # 计算奖励
  if hdvH_cavM_y_diff >= hdvH_cavM_min_y_diff and hdvH_cavM_y_diff <= hdvH_cavM_min_y_diff + 5:
    # 给予正向奖励
    reward += (60 - 12 * hdvH_cavM_y_diff)
  else:
    reward += -abs(hdvH_cavM_y_diff - hdvH_cavM_min_y_diff)

  if cavM_cav1_y_diff >= cavM_cav1_min_y_diff and cavM_cav1_y_diff <= cavM_cav1_min_y_diff + 5:
    # 给予正向奖励
    reward += (60 - 12 * cavM_cav1_y_diff)
  else:
    reward += -abs(cavM_cav1_y_diff - cavM_cav1_min_y_diff)

  if cav1_hdvF_y_diff >= cav1_hdvF_min_y_diff and cav1_hdvF_y_diff <= cav1_hdvF_min_y_diff + 5:
    # 给予正向奖励
    reward += (60 - 12 * cav1_hdvF_y_diff)
  else:
    reward += -abs(cav1_hdvF_y_diff - cav1_hdvF_min_y_diff)

  # 计算预测状态
  hdvH_predict_y = np.array(predict_state["hdvH_predict_states"])[:, 0]
  hdvH_predict_v = np.array(predict_state["hdvH_predict_states"])[:, 2]
  cavM_predict_y = np.array(predict_state["cavM_predict_states"])[:, 0]
  cavM_predict_v_y = np.array(predict_state["cavM_predict_states"])[:, 4]
  cav1_predict_y = np.array(predict_state["cav1_predict_states"])[:, 0]
  cav1_predict_v = np.array(predict_state["cav1_predict_states"])[:, 2]
  hdvF_predict_y = np.array(predict_state["hdvF_predict_states"])[:, 0]
  hdvF_predict_v = np.array(predict_state["hdvF_predict_states"])[:, 2]
  for i in range(len(hdvH_predict_y)):
    # 间距
    hdvH_cavM_y_diff = abs(hdvH_predict_y[i] - cavM_predict_y[i])
    cavM_cav1_y_diff = abs(cavM_predict_y[i] - cav1_predict_y[i])
    cav1_hdvF_y_diff = abs(cav1_predict_y[i] - hdvF_predict_y[i])

    # 速度
    cavM_v_y = cavM_predict_v_y[i]
    cav1_v = cav1_predict_v[i]
    hdvF_v = hdvF_predict_v[i]

    # 期望间距
    hdvH_cavM_min_y_diff = cavM_v_y * thdv
    cavM_cav1_min_y_diff = cav1_v * tcav
    cav1_hdvF_min_y_diff = hdvF_v * tcav

    # 计算奖励
    if hdvH_cavM_y_diff >= hdvH_cavM_min_y_diff and hdvH_cavM_y_diff <= hdvH_cavM_min_y_diff + 5:
      # 给予正向奖励
      reward += (60 - 12 * hdvH_cavM_y_diff)
    else:
      reward += -abs(hdvH_cavM_y_diff - hdvH_cavM_min_y_diff)

    if cavM_cav1_y_diff >= cavM_cav1_min_y_diff and cavM_cav1_y_diff <= cavM_cav1_min_y_diff + 5:
      # 给予正向奖励
      reward += (60 - 12 * cavM_cav1_y_diff)
    else:
      reward += -abs(cavM_cav1_y_diff - cavM_cav1_min_y_diff)

    if cav1_hdvF_y_diff >= cav1_hdvF_min_y_diff and cav1_hdvF_y_diff <= cav1_hdvF_min_y_diff + 5:
      # 给予正向奖励
      reward += (60 - 12 * cav1_hdvF_y_diff)
    else:
      reward += -abs(cav1_hdvF_y_diff - cav1_hdvF_min_y_diff)

  # 归一化
  normalized_reward = 0
  total = 3 * len(hdvH_predict_y)
  # 采用分段函数策略
  if reward < -60 * len(hdvH_predict_y):
    reward = -1
  else:
    reward = reward / 60 / total # [-60, 60] -> [-1, 1]

  return normalized_reward, reward

# 加速度变化率
def r_acc_jerk(dt, cavM_accs, last_cavM_accs, cav1_accs, last_cav1_accs, cavM_predict_accs, cav1_predict_accs):
  reward = 0

  # 计算奖励
  cavM_total_accs = np.concatenate((np.array(last_cavM_accs), np.array(cavM_accs), np.array(cavM_predict_accs)), axis=0)
  cav1_total_accs = np.concatenate((np.array(last_cav1_accs), np.array(cav1_accs), np.array(cav1_predict_accs)), axis=0)
  for i in range(1, len(cavM_total_accs)):
    cavM_last_acc = cavM_total_accs[i - 1]
    cavM_curr_acc = cavM_total_accs[i]
    cavM_acc_jerk = abs(cavM_curr_acc - cavM_last_acc) / dt
    reward += MAX_ACC_JERK - cavM_acc_jerk

    cav1_last_acc = cav1_total_accs[i - 1]
    cav1_curr_acc = cav1_total_accs[i]
    cav1_acc_jerk = abs(cav1_curr_acc - cav1_last_acc) / dt
    reward += MAX_ACC_JERK - cav1_acc_jerk

  # 归一化
  total = 2 * (len(cavM_total_accs) - 1)
  normalized_reward = reward / MAX_ACC_JERK / total
  return normalized_reward, reward

# 转向角变化率
def r_delta_jerk(dt, cavM_deltas, last_cavM_deltas, cavM_predict_deltas):
  reward = 0

  # 计算奖励
  cavM_total_deltas = np.concatenate((np.array(last_cavM_deltas), np.array(cavM_deltas), np.array(cavM_predict_deltas)), axis=0)
  for i in range(1, len(cavM_total_deltas)):
    cavM_last_delta = cavM_total_deltas[i - 1]
    cavM_curr_delta = cavM_total_deltas[i]
    cavM_delta_jerk = abs(cavM_curr_delta - cavM_last_delta) / dt
    reward += MAX_DELTA_JERK - cavM_delta_jerk

  # 归一化
  total = len(cavM_total_deltas) - 1
  normalized_reward = reward / MAX_DELTA_JERK / total
  return normalized_reward, reward

# 航向角
def r_psi(curr_step, original_state, last_original_state, predict_state):
  reward = 0
  # 转换成角度
  cavM_total_psi = np.concatenate((np.array([last_original_state[3]]), np.array([original_state[3]]), np.array(predict_state["cavM_predict_states"][:, 2])), axis=0)
  for i in range(1, len(cavM_total_psi)):
    cavM_last_psi = cavM_total_psi[i - 1] * 180 / math.pi
    cavM_curr_psi = cavM_total_psi[i] * 180 / math.pi
    curr_step += i - 1

    # 计算奖励
    # 范围检查
    if cavM_curr_psi < PSI_RANGE[0]:
      # 范围过大
      reward += -abs(PSI_RANGE[0] - cavM_curr_psi)
    elif cavM_curr_psi > PSI_RANGE[1]:
      # 范围过小
      reward += -abs(PSI_RANGE[1] - cavM_curr_psi)
    else:
      # !和时间挂钩
      if curr_step <= 15:
        # 换道前期，尽量采用大范围角度
        reward += abs(cavM_curr_psi - PSI_RANGE[1])
      elif curr_step <= 20:
        # 换道中期，尽量保持角度不变
        reward += -abs(cavM_curr_psi - cavM_last_psi)
      else:
        # 换道后期，尽量采用小范围角度
        reward += abs(cavM_curr_psi - PSI_RANGE[0])

  # 归一化 采用分段函数策略
  max_diff = PSI_RANGE[1] - PSI_RANGE[0]
  normalized_reward = 0
  total = len(cavM_total_psi) - 1
  reward = reward / total
  if reward < 0:
    if reward <= (- max_diff):
      normalized_reward = -1
    else:
      normalized_reward = reward / max_diff
  else:
    normalized_reward = reward / max_diff
  return normalized_reward, reward

# 横向速度
def r_v_x(last_kalman_state, original_kalman_state, predict_state, curr_step):
  reward = 0

  # 计算奖励
  # todo 像航向角一样引入时间步分段逻辑，在后半段的时候要尽可能的降低横向速度，最好逐渐趋于0
  cavM_total_v_x = np.concatenate((np.array([last_kalman_state["cavM_filter_state"][5]]), np.array([original_kalman_state["cavM_filter_state"][5]]), np.array(predict_state["cavM_predict_states"][:, 5])), axis=0)

  for i in range(1, len(cavM_total_v_x)):
    cavM_last_v_x = cavM_total_v_x[i - 1]
    cavM_curr_v_x = cavM_total_v_x[i]
    curr_step += i - 1

    # 不要出现负的横向速度
    if cavM_curr_v_x < 0:
      reward += -abs(cavM_curr_v_x)
    else:
      # 和时间挂钩
      if curr_step <= 15:
        # 换道前期，尽量采用大范围横向速度
        reward += cavM_curr_v_x - cavM_last_v_x
      elif curr_step <= 20:
        # 换道中期，尽量保持横向速度不变
        reward += -abs(cavM_curr_v_x - cavM_last_v_x)
      else:
        # 换道后期，减小横向速度
        reward += cavM_last_v_x - cavM_curr_v_x

  # 归一化
  total = len(cavM_total_v_x) - 1
  normalized_reward = reward / 3 / total
  return normalized_reward, reward

# cavM的横向累计换道距离
def r_x (last_kalman_state, original_kalman_state, predict_state, lane_width, total_step):
  reward = 0

  # 过去 + 现在 + 未来的x
  cavM_total_x = np.concatenate((np.array([last_kalman_state["cavM_filter_state"][1]]), np.array([original_kalman_state["cavM_filter_state"][1]]), np.array(predict_state["cavM_predict_states"][:, 1])), axis=0)

  for i in range(1, len(cavM_total_x)):
    cavM_last_x = cavM_total_x[i - 1]
    cavM_curr_x = cavM_total_x[i]
    
    # 防止向回走
    if cavM_curr_x < cavM_last_x:
      reward += -abs(cavM_curr_x - cavM_last_x)
    # 期望换道距离不要超过道路宽度
    elif cavM_curr_x > lane_width:
      reward += -abs(cavM_curr_x - lane_width)
    else:
      reward += abs(cavM_curr_x - cavM_last_x) / lane_width / total_step # 把换道宽度 / 总的时间得到一帧期望的换道距离
  
  total = len(cavM_total_x) - 1
  normalized_reward = reward / total
  return normalized_reward, reward