
# !处理路径导入问题（添加绝对路径）！！！
import sys
import os
CODE_INTERNAL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # 生成Code文件夹内部对应的绝对路径
sys.path.append(CODE_INTERNAL_PATH)

# 禁用wandb
os.environ['WANDB_MODE'] = 'disabled'

import tensorflow as tf
import numpy as np
import copy
import math
import sys
import pickle

from Env.Follow_Env import FollowEnv
from transformer._02_transformer_follow import getData
from rl_controller.RL.Follow_SAC import SAC

FILE_PATH_I80_1_to = "../../../Data/Ngsim数据集/I80数据集/3. 提取数据/1. 跟随数据/trajectories-0400-0415_follow.txt"
FILE_PATH_I80_2_to = "../../../Data/Ngsim数据集/I80数据集/3. 提取数据/1. 跟随数据/trajectories-0500-0515_follow.txt"

ACTOR_MODEL_EXCELLENT_REWARD = "../Model/final_model/sac_actor_final_ep2.h5"

class TestFollowEnv:
  def __init__(self, actor_model_path, env, sac):
    self.sac = sac
    self.env = env
    
    # 加载模型权重
    self.sac.actor.load_weights(actor_model_path)
    print("加载actor模型权重：", actor_model_path)

  def act(self, state):
    """使用确定性策略选择动作（测试时使用）"""
    state = np.expand_dims(state, axis=0).astype(np.float64)
    
    # 测试时使用确定性策略：使用均值
    _, _, mean, _ = self.sac.actor.predict(state)
    action = mean[0]
    
    return action

  def test(self):
    # 读取数据
    hdvH_data, hdvF_data = self.sac.dataReader()
    print("hdvH_data: ", len(hdvH_data), len(hdvH_data[0]), len(hdvH_data[0][0])) # 2727 200 4
    print("hdvF_data: ", len(hdvF_data), len(hdvF_data[0]), len(hdvF_data[0][0])) # 2727 200 4

    hdvH_data = hdvH_data[:200] # 测试用，减少数据量
    hdvF_data = hdvF_data[:200] # 测试用，减少数据量

    collect_all_data = []
    for epoch in range(len(hdvH_data)):
      # 选择数据并初始化环境
      hdvH_epoch_data = hdvH_data[epoch]
      hdvF_epoch_data = hdvF_data[epoch]
      cur_state, original_state, original_kalman_state, original_control_state = self.env.reset(hdvH_epoch_data, hdvF_epoch_data)
      print("======== epoch: ", epoch, " ========")
      print("original_state: ", np.array(original_state).tolist())

      # !收集所有state和action
      collect_state = [original_state]
      collect_kalman_state = [original_kalman_state]
      collect_control_state = [original_control_state]
      collect_reward = []
      collect_reward_info = []

      done = False

      while not done:
        # 选择动作
        action = self.act(cur_state)

        # 环境交互
        next_state, reward, done, reward_info, original_state, original_kalman_state, original_control_state = self.env.step(action)
        collect_state.append(original_state)
        collect_kalman_state.append(original_kalman_state)
        collect_control_state.append(original_control_state)
        collect_reward.append(reward)
        collect_reward_info.append(reward_info)

        # 更新状态
        cur_state = next_state

      # 收集数据
      draw_data_item = self.extract_draw_data(collect_state, collect_reward, collect_reward_info, collect_kalman_state, collect_control_state)
      collect_all_data.append(draw_data_item)

    return collect_all_data

  def extract_draw_data(self, state, reward, reward_info, kalman_state, control_state):
    # 数据补齐（state会比其他数据多一帧）
    collect_state = state
    collect_kalman_state = kalman_state
    collect_control_state = control_state
    collect_reward = np.insert(reward, 0, 0).tolist()
    collect_reward_info = np.concatenate(([reward_info[0]], reward_info), axis=0).tolist()

    # 转为np数组
    collect_state = np.array(collect_state)
    collect_reward = np.array(collect_reward)

    collect_data_item = {
      "hdvH_y": [float(d["hdvH_true_state"][0]) for d in collect_kalman_state],
      "hdvH_v": [float(d["hdvH_true_state"][1]) for d in collect_kalman_state],
      "cavM_y": [float(d["cavM_true_state"][0]) for d in collect_kalman_state],
      "cavM_x": [float(d["cavM_true_state"][1]) for d in collect_kalman_state],
      "cavM_v_y": [float(d["cavM_true_state"][4]) for d in collect_kalman_state],
      "cavM_v_x": [float(d["cavM_true_state"][5]) for d in collect_kalman_state],
      "cavM_psi": [float(d["cavM_true_state"][2]) for d in collect_kalman_state],
      "cavM_acc": [float(d["cavM_accs"][0]) for d in collect_control_state],
      "cavM_delta": [float(d["cavM_deltas"][0]) for d in collect_control_state],
      "collect_reward": collect_reward.tolist(),
      "cav1_y": [float(d["cav1_true_state"][0]) for d in collect_kalman_state],
      "cav1_v": [float(d["cav1_true_state"][1]) for d in collect_kalman_state],
      "hdvF_y": [float(d["hdvF_true_state"][0]) for d in collect_kalman_state],
      "hdvF_v": [float(d["hdvF_true_state"][1]) for d in collect_kalman_state],
      "hdvH_acc": [float(d["hdvH_accs"][0]) for d in collect_control_state],
      "cav1_acc": [float(d["cav1_accs"][0]) for d in collect_control_state],
      "hdvF_acc": [float(d["hdvF_accs"][0]) for d in collect_control_state],
      "hdvH_mea_y": [float(d["hdvH_mea_state"][0]) for d in collect_kalman_state],
      "hdvH_mea_v": [float(d["hdvH_mea_state"][1]) for d in collect_kalman_state],
      "cavM_mea_y": [float(d["cavM_mea_state"][0]) for d in collect_kalman_state],
      "cavM_mea_x": [float(d["cavM_mea_state"][1]) for d in collect_kalman_state],
      "cavM_mea_psi": [float(d["cavM_mea_state"][2]) for d in collect_kalman_state],
      "cavM_mea_v": [float(d["cavM_mea_state"][3]) for d in collect_kalman_state],
      "cavM_mea_v_y": [float(d["cavM_mea_state"][4]) for d in collect_kalman_state],
      "cavM_mea_v_x": [float(d["cavM_mea_state"][5]) for d in collect_kalman_state],
      "cav1_mea_y": [float(d["cav1_mea_state"][0]) for d in collect_kalman_state],
      "cav1_mea_v": [float(d["cav1_mea_state"][1]) for d in collect_kalman_state],
      "hdvF_mea_y": [float(d["hdvF_mea_state"][0]) for d in collect_kalman_state],
      "hdvF_mea_v": [float(d["hdvF_mea_state"][1]) for d in collect_kalman_state],
      "hdvH_filter_y": [float(d["hdvH_filter_state"][0]) for d in collect_kalman_state],
      "hdvH_filter_v": [float(d["hdvH_filter_state"][1]) for d in collect_kalman_state],
      "hdvH_filter_covariances_y": [float(d["hdvH_filter_covariances"][0]) for d in collect_kalman_state],
      "hdvH_filter_covariances_v": [float(d["hdvH_filter_covariances"][1]) for d in collect_kalman_state],
      "cavM_filter_y": [float(d["cavM_filter_state"][0]) for d in collect_kalman_state],
      "cavM_filter_x": [float(d["cavM_filter_state"][1]) for d in collect_kalman_state],
      "cavM_filter_psi": [float(d["cavM_filter_state"][2]) for d in collect_kalman_state],
      "cavM_filter_v": [float(d["cavM_filter_state"][3]) for d in collect_kalman_state],
      "cavM_filter_v_y": [float(d["cavM_filter_state"][4]) for d in collect_kalman_state],
      "cavM_filter_v_x": [float(d["cavM_filter_state"][5]) for d in collect_kalman_state],
      "cavM_filter_covariances_y": [d["cavM_filter_covariances"][1] for d in collect_kalman_state],
      "cavM_filter_covariances_x": [d["cavM_filter_covariances"][0] for d in collect_kalman_state],
      "cavM_filter_covariances_psi": [d["cavM_filter_covariances"][2] for d in collect_kalman_state],
      "cavM_filter_covariances_v": [d["cavM_filter_covariances"][3] for d in collect_kalman_state],
      "cav1_filter_y": [float(d["cav1_filter_state"][0]) for d in collect_kalman_state],
      "cav1_filter_v": [float(d["cav1_filter_state"][1]) for d in collect_kalman_state],
      "cav1_filter_covariances_y": [d["cav1_filter_covariances"][0] for d in collect_kalman_state],
      "cav1_filter_covariances_v": [d["cav1_filter_covariances"][1] for d in collect_kalman_state],
      "hdvF_filter_y": [float(d["hdvF_filter_state"][0]) for d in collect_kalman_state],
      "hdvF_filter_v": [float(d["hdvF_filter_state"][1]) for d in collect_kalman_state],
      "hdvF_filter_covariances_y": [d["hdvF_filter_covariances"][0] for d in collect_kalman_state],
      "hdvF_filter_covariances_v": [d["hdvF_filter_covariances"][1] for d in collect_kalman_state],
    }
    return collect_data_item

  def _data_reader(self, fileName):
    follow_data = getData(FILE_PATH_I80_1_to, FILE_PATH_I80_2_to)

    print("数据集大小: ", len(follow_data), len(follow_data[0]), len(follow_data[0][0])) # 2727 200 14
    # 将数据拆分为前车数据和后车数据。
    hdvH_data = np.array(follow_data)[:, :, [5, 3, 7, 9]] # 前车数据：y坐标、x坐标、纵向速度、纵向加速度
    hdvF_data = np.array(follow_data)[:, :, [6, 4, 8, 10]] # 后车数据：y坐标、x坐标、纵向速度、纵向加速

    return hdvH_data, hdvF_data

def write_data(objs, fileName):
  with open(fileName, "wb") as file:
    pickle.dump(objs, file)

if __name__ == "__main__":
  # 测试环境初始化
  env = FollowEnv()
  sac = SAC(env)
  test = TestFollowEnv(ACTOR_MODEL_EXCELLENT_REWARD, env, sac)

  # 测试 + 提取数据
  collect_data = test.test()

  # 数据写入文件
  write_data(collect_data, "./data/collect_data.pkl")