
# !处理路径导入问题（添加绝对路径）！！！
import sys
import os
CODE_INTERNAL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(CODE_INTERNAL_PATH)

# 设置UTF-8编码
os.environ['PYTHONUTF8'] = '1'

# 引入外部包
import wandb
import time
import random
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from datetime import datetime
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, Concatenate
from tensorflow.keras.optimizers import Adam
from collections import deque

# 引入内部包（假设有相同的环境和其他模块）
from Prioritized_Replay import Memory
from Env.Follow_Env import FollowEnv
from transformer._02_transformer_follow import getData

tf.keras.backend.set_floatx('float64')
wandb.init(name=f"SAC_{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", project="deep-rl-sac-follow", mode="offline")

FILE_PATH_I80_1_to = "../../../Data/Ngsim数据集/I80数据集/3. 提取数据/1. 跟随数据/trajectories-0400-0415_follow.txt"
FILE_PATH_I80_2_to = "../../../Data/Ngsim数据集/I80数据集/3. 提取数据/1. 跟随数据/trajectories-0500-0515_follow.txt"

class SAC:
    def __init__(
        self,
        env,
        use_priority=True,
        lr_actor=3e-4,           # Actor学习率
        lr_critic=3e-4,          # Critic学习率
        lr_alpha=3e-4,           # 温度参数学习率
        actor_units=(256, 256),  # Actor网络结构
        critic_units=(256, 256), # Critic网络结构
        gamma=0.99,              # 折扣因子
        tau=0.005,               # 软更新参数
        alpha=0.2,               # 初始温度参数
        auto_entropy_tuning=True,# 是否自动调整温度参数
        target_entropy=None,     # 目标熵
        batch_size=256,          # 批量大小
        memory_cap=1000000,      # 经验回放容量
        reward_scale=1.0         # 奖励缩放因子
    ):
        self.env = env
        self.state_shape = (env.n_state,)  # 状态维度
        self.action_dim = env.n_action  # 动作维度
        self.action_bound = env.action_bound  # 动作范围
        self.action_shift = 0 # -1~1  --->  -3~3  bound为3 偏移为0

        self.use_priority = use_priority
        self.memory = Memory(capacity=memory_cap) if use_priority else deque(maxlen=memory_cap)
        
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.reward_scale = reward_scale
        
        # 自动熵调整
        self.auto_entropy_tuning = auto_entropy_tuning
        if target_entropy is None:
            self.target_entropy = -np.prod(self.action_dim).item()
        else:
            self.target_entropy = target_entropy
            
        # 温度参数
        self.alpha = tf.Variable(alpha, dtype=tf.float64)
        if self.auto_entropy_tuning:
            self.log_alpha = tf.Variable(tf.math.log(self.alpha), dtype=tf.float64)
            self.alpha_optimizer = Adam(learning_rate=lr_alpha)
        
        # 创建网络
        self._create_networks(actor_units, critic_units, lr_actor, lr_critic)
        
        # 更新目标网络权重
        self._update_target_networks(tau=1.0)

    def _create_networks(self, actor_units, critic_units, lr_actor, lr_critic):
        """创建Actor和Critic网络"""
        # Actor网络
        self.actor = self._build_actor(actor_units)
        self.actor_optimizer = Adam(learning_rate=lr_actor)
        
        # Critic网络（两个Q网络）
        self.critic1 = self._build_critic(critic_units)
        self.critic2 = self._build_critic(critic_units)
        self.critic1_optimizer = Adam(learning_rate=lr_critic)
        self.critic2_optimizer = Adam(learning_rate=lr_critic)
        
        # 目标Critic网络
        self.target_critic1 = self._build_critic(critic_units)
        self.target_critic2 = self._build_critic(critic_units)

    def _build_actor(self, units):
        """构建Actor网络"""
        state = Input(shape=self.state_shape)
        x = Dense(units[0], activation='relu')(state)
        for i in range(1, len(units)):
            x = Dense(units[i], activation='relu')(x)
        
        # 输出均值和标准差
        mean = Dense(self.action_dim, activation='tanh')(x)
        log_std = Dense(self.action_dim)(x)
        
        # 限制标准差范围
        log_std = Lambda(lambda x: tf.clip_by_value(x, -20, 2))(log_std)
        std = Lambda(lambda x: tf.exp(x))(log_std)
        
        # 重参数化采样
        def sample_action(args):
            mean, std = args
            normal = tfp.distributions.Normal(mean, std)
            x_t = normal.sample()
            y_t = tf.tanh(x_t)
            action = y_t * self.action_bound + self.action_shift
            
            # 计算log概率
            log_prob = normal.log_prob(x_t)
            log_prob -= tf.reduce_sum(tf.math.log(1 - y_t**2 + 1e-6), axis=1, keepdims=True)
            
            return action, log_prob, mean, std
        
        action, log_prob, mean_out, std_out = Lambda(sample_action)([mean, std])
        
        model = Model(inputs=state, outputs=[action, log_prob, mean_out, std_out])
        return model

    def _build_critic(self, units):
        """构建Critic网络"""
        state_input = Input(shape=self.state_shape)
        action_input = Input(shape=(self.action_dim,))
        
        x = Concatenate()([state_input, action_input])
        x = Dense(units[0], activation='relu')(x)
        for i in range(1, len(units)):
            x = Dense(units[i], activation='relu')(x)
        
        q_value = Dense(1)(x)
        
        model = Model(inputs=[state_input, action_input], outputs=q_value)
        return model

    def _update_target_networks(self, tau):
        """更新目标网络"""
        def update_weights(model, target_model):
            weights = model.get_weights()
            target_weights = target_model.get_weights()
            for i in range(len(target_weights)):
                target_weights[i] = weights[i] * tau + target_weights[i] * (1 - tau)
            target_model.set_weights(target_weights)
        
        update_weights(self.critic1, self.target_critic1)
        update_weights(self.critic2, self.target_critic2)

    def act(self, state, deterministic=False):
        """选择动作"""
        state = np.expand_dims(state, axis=0).astype(np.float64)
        
        if deterministic:
            # 确定性策略：使用均值
            _, _, mean, _ = self.actor.predict(state)
            action = mean[0]
        else:
            # 随机策略：采样
            action, _, _, _ = self.actor.predict(state)
            action = action[0]
        
        # 记录Q值
        q1_val = self.critic1.predict([state, np.expand_dims(action, axis=0)])[0][0]
        wandb.log({'q_val': q1_val})
        
        return action

    def remember(self, state, action, reward, next_state, done):
        """存储经验"""
        if self.use_priority:
            state = state.flatten() if len(state.shape) > 1 else state
            next_state = next_state.flatten() if len(next_state.shape) > 1 else next_state
            action = action.flatten() if len(action.shape) > 1 else action

            # 构建transition数组
            transition = np.hstack([state, action, [reward], next_state, [done]])
            self.memory.store(transition)
        else:
            state = np.expand_dims(state, axis=0)
            next_state = np.expand_dims(next_state, axis=0)
            self.memory.append([state, action, reward, next_state, done])

    def train_step(self):
        """训练网络"""
        if len(self.memory) < self.batch_size:
            return None, None, None

        # 采样
        if self.use_priority:
            tree_idx, samples, ISWeights = self.memory.sample(self.batch_size)
            state_dim = self.state_shape[0]  # 状态维度
            split_points = [state_dim, state_dim + self.action_dim, state_dim + self.action_dim + 1, 
                       state_dim + self.action_dim + 1 + state_dim]
            states = samples[:, :split_points[0]]
            actions = samples[:, split_points[0]:split_points[1]]
            rewards = samples[:, split_points[1]:split_points[2]]
            next_states = samples[:, split_points[2]:split_points[3]]
            dones = samples[:, split_points[3]:]
            ISWeights = tf.convert_to_tensor(ISWeights, dtype=tf.float64)
        else:
            tree_idx = None # 非优先级回放时设为None
            ISWeights = 1.0
            samples = random.sample(self.memory, self.batch_size)
            s = np.array(samples, dtype=object).T
            states, actions, rewards, next_states, dones = [np.vstack(s[i, :]).astype(np.float64) for i in range(5)]

        # 转换为Tensor
        states = tf.convert_to_tensor(states, dtype=tf.float64)
        actions = tf.convert_to_tensor(actions, dtype=tf.float64)
        rewards = tf.convert_to_tensor(rewards * self.reward_scale, dtype=tf.float64)
        next_states = tf.convert_to_tensor(next_states, dtype=tf.float64)
        dones = tf.convert_to_tensor(dones, dtype=tf.float64)

        # 更新Critic网络
        critic1_loss, critic2_loss = self._update_critics(states, actions, rewards, next_states, dones, ISWeights, tree_idx)
        
        # 更新Actor网络
        actor_loss, log_pi = self._update_actor(states, ISWeights)
        
        # 更新温度参数
        if self.auto_entropy_tuning:
            alpha_loss = self._update_alpha(log_pi)
            wandb.log({'alpha_loss': alpha_loss})
            wandb.log({'alpha': self.alpha.numpy()})

        # 更新目标网络
        self._update_target_networks(self.tau)

        # 记录损失
        wandb.log({'critic1_loss': critic1_loss})
        wandb.log({'critic2_loss': critic2_loss})
        wandb.log({'actor_loss': actor_loss})
        wandb.log({'log_pi': tf.reduce_mean(log_pi)})

    def _update_critics(self, states, actions, rewards, next_states, dones, ISWeights, tree_idx=None):
        """更新Critic网络"""
        with tf.GradientTape(persistent=True) as tape:
            # 目标Q值
            next_actions, next_log_pi, _, _ = self.actor(next_states)
            target_q1 = self.target_critic1([next_states, next_actions])
            target_q2 = self.target_critic2([next_states, next_actions])
            target_q = tf.minimum(target_q1, target_q2) - self.alpha * next_log_pi
            target_q = rewards + self.gamma * (1 - dones) * target_q

            # 当前Q值
            current_q1 = self.critic1([states, actions])
            current_q2 = self.critic2([states, actions])

            # Critic损失
            critic1_loss = tf.reduce_mean(ISWeights * tf.square(current_q1 - target_q))
            critic2_loss = tf.reduce_mean(ISWeights * tf.square(current_q2 - target_q))

        # 更新Critic1
        critic1_grad = tape.gradient(critic1_loss, self.critic1.trainable_variables)
        self.critic1_optimizer.apply_gradients(zip(critic1_grad, self.critic1.trainable_variables))
        
        # 更新Critic2
        critic2_grad = tape.gradient(critic2_loss, self.critic2.trainable_variables)
        self.critic2_optimizer.apply_gradients(zip(critic2_grad, self.critic2.trainable_variables))

        # 更新优先级
        if self.use_priority:
            td_errors = tf.abs(current_q1 - target_q) + tf.abs(current_q2 - target_q)
            td_errors_flat = tf.reshape(td_errors, [-1]).numpy()
            self.memory.batch_update(tree_idx, td_errors_flat)

        return critic1_loss.numpy(), critic2_loss.numpy()

    def _update_actor(self, states, ISWeights):
        """更新Actor网络"""
        with tf.GradientTape() as tape:
            actions, log_pi, _, _ = self.actor(states)
            q1 = self.critic1([states, actions])
            q2 = self.critic2([states, actions])
            min_q = tf.minimum(q1, q2)
            
            actor_loss = tf.reduce_mean(ISWeights * (self.alpha * log_pi - min_q))

        actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)
        self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))
        
        return actor_loss.numpy(), log_pi

    def _update_alpha(self, log_pi):
        """更新温度参数"""
        with tf.GradientTape() as tape:
            alpha_loss = -tf.reduce_mean(self.log_alpha * (log_pi + self.target_entropy))

        alpha_grad = tape.gradient(alpha_loss, [self.log_alpha])
        self.alpha_optimizer.apply_gradients(zip(alpha_grad, [self.log_alpha]))
        
        # 更新alpha值
        self.alpha.assign(tf.exp(self.log_alpha))
        
        return alpha_loss.numpy()

    def save_model(self, actor_fn, critic1_fn, critic2_fn):
        """保存模型"""
        self.actor.save(actor_fn)
        self.critic1.save(critic1_fn)
        self.critic2.save(critic2_fn)

    def load_model(self, actor_fn, critic1_fn, critic2_fn):
        """加载模型"""
        self.actor.load_weights(actor_fn)
        self.critic1.load_weights(critic1_fn)
        self.critic2.load_weights(critic2_fn)
        self.target_critic1.load_weights(critic1_fn)
        self.target_critic2.load_weights(critic2_fn)

    # 以下方法与DDPG相同，用于数据读取和训练循环
    def dataReader(self):
        """读取数据（与DDPG相同）"""
        follow_data = getData(FILE_PATH_I80_1_to, FILE_PATH_I80_2_to)
        print("数据集大小: ", len(follow_data), len(follow_data[0]), len(follow_data[0][0])) # 2727 200 14
        # 将数据拆分为前车数据和后车数据。
        hdvH_data = np.array(follow_data)[:, :, [5, 3, 7, 9]] # 前车数据：y坐标、x坐标、纵向速度、纵向加速度
        hdvF_data = np.array(follow_data)[:, :, [6, 4, 8, 10]] # 后车数据：y坐标、x坐标、纵向速度、纵向加速度

        return hdvH_data, hdvF_data

    def train(self, max_episodes=2):
        """训练循环（与DDPG类似）"""
        # 读取数据
        hdvH_data, hdvF_data = self.dataReader()
        print("hdvH_data: ", len(hdvH_data), len(hdvH_data[0]), len(hdvH_data[0][0])) # 2727 200 4
        print("hdvF_data: ", len(hdvF_data), len(hdvF_data[0]), len(hdvF_data[0][0])) # 2727 200 4

        hdvH_data = hdvH_data[:800] # 测试用，减少数据量
        hdvF_data = hdvF_data[:800] # 测试用，减少数据量

        # 训练参数
        episode, total_reward, train_step, max_reward = 0, 0, 0, 0

        for episode in range(max_episodes):
            for epoch in range(len(hdvH_data)):
                # 选择数据并初始化环境
                hdvH_epoch_data = hdvH_data[epoch]
                hdvF_epoch_data = hdvF_data[epoch]
                cur_state, _, _, _ = self.env.reset(hdvH_epoch_data, hdvF_epoch_data)

                epoch_reward, done = 0, False
                reward_acc, reward_delta, reward_v_diff, reward_y_diff = 0, 0, 0, 0
                reward_acc_jerk, reward_delta_jerk, reward_psi, reward_v_x, reward_x = 0, 0, 0, 0, 0
                step = 1

                while not done:
                    # 选择动作
                    action = self.act(cur_state)
                    
                    # 环境交互
                    next_state, reward, done, reward_info, _, _, _ = self.env.step(action)
                    
                    # 存储经验
                    self.remember(cur_state, action, reward, next_state, done)
                    
                    # 训练
                    self.train_step()
                    train_step += 1
                    
                    # 更新状态
                    cur_state = next_state
                    epoch_reward += reward
                    step += 1

                    # 累积各项奖励
                    reward_acc += reward_info["reward_acc"]
                    reward_delta += reward_info["reward_delta"]
                    reward_v_diff += reward_info["reward_v_diff"]
                    reward_y_diff += reward_info["reward_y_diff"]
                    reward_acc_jerk += reward_info["reward_acc_jerk"]
                    reward_delta_jerk += reward_info["reward_delta_jerk"]
                    reward_psi += reward_info["reward_psi"]
                    reward_v_x += reward_info["reward_v_x"]
                    reward_x += reward_info["reward_x"]

                # 记录和保存模型
                epoch_reward /= step
                reward_acc /= step
                reward_delta /= step
                reward_v_diff /= step
                reward_y_diff /= step
                reward_acc_jerk /= step
                reward_delta_jerk /= step
                reward_psi /= step
                reward_v_x /= step
                reward_x /= step
                wandb.log({'Reward': epoch_reward})
                wandb.log({'reward_acc': reward_acc})
                wandb.log({'reward_delta': reward_delta})
                wandb.log({'reward_v_diff': reward_v_diff})
                wandb.log({'reward_y_diff': reward_y_diff})
                wandb.log({'reward_acc_jerk': reward_acc_jerk})
                wandb.log({'reward_delta_jerk': reward_delta_jerk})
                wandb.log({'reward_psi': reward_psi})
                wandb.log({'reward_v_x': reward_v_x})
                wandb.log({'reward_x': reward_x})

                if epoch_reward >= max_reward:
                    max_reward = epoch_reward
                    self.save_model(
                        f"../Model/train_model/sac_actor_ep{episode}_reward{epoch_reward:.2f}.h5",
                        f"../Model/train_model/sac_critic1_ep{episode}_reward{epoch_reward:.2f}.h5",
                        f"../Model/train_model/sac_critic2_ep{episode}_reward{epoch_reward:.2f}.h5"
                    )

                print(f"Episode {episode}, Epoch {epoch}, Reward: {epoch_reward:.2f}")

        # 保存最终模型
        self.save_model(
            f"../Model/final_model/sac_actor_final_ep{episode}.h5",
            f"../Model/final_model/sac_critic1_final_ep{episode}.h5",
            f"../Model/final_model/sac_critic2_final_ep{episode}.h5"
        )

if __name__ == "__main__":
    # 训练模型
    sac = SAC(FollowEnv())
    sac.train()