# 计算单元类CU, 包含子类RSU和OBU
import random

from simulation_constants import *;

class CU:
    """ 计算单元类CU, 包含子类RSU和OBU
    :param id: 计算单元ID
    :param x: 计算单元初始x坐标
    :param y: 计算单元初始y坐标
    :param computing_power: 计算单元计算能力, 单位: G Cycles/s
    :param nextIdleTimeFrame: 下一个空闲的时间帧序号, 用以替换任务队列模式, 被分配计算任务的 CU 应当以100%的算力来处理计算任务以保证尽量不浪费算力
    """
    id: str = ""
    x: float = -1
    y: float = -1
    computing_power: float = 1
    task_queue: list = []
    next_idle_time_frame: int = 0

def clip(value, min_value, max_value):
    """ 用于限制值在指定范围内
    :param value: 待限制的值
    :param min_value: 最小值
    :param max_value: 最大值
    :return: 限制在[min_value, max_value]范围内的值
    """
    return max(min_value, min(value, max_value))


# OBU (On Board Unit) 车载单元
class VehicleOrOBU(CU):
    velocity: float = -1  # 车辆当前速度 m/s
    update_flag: bool = False  # 清理标志
    enter_scenario_time: int = -1  # 车辆进入场景的时间帧序号
    angle: float = 400
    lane: str = "default_lane"  # 车辆所在车道

    
    def __init__(self,
                 id="",
                 x=0.0,
                 y=0.0,
                 velocity=0,
                 computing_power=clip(random.gauss(OBU_COMPUTING_POWER_MU, OBU_COMPUTING_POWER_SIGMA), OBU_COMPUTING_POWER_MU - 3 * OBU_COMPUTING_POWER_SIGMA, OBU_COMPUTING_POWER_MU + 3 * OBU_COMPUTING_POWER_SIGMA),
                 enter_scenario_time=0,
                 angle=400,
                 lane="default_lane",
                 os_policy=None,
                 manager_RSU="RSU"):
        """ 车辆类初始化函数
        :param id: 车辆ID
        :param x: 车辆初始x坐标
        :param y: 车辆初始y坐标
        :param velocity: 车辆初始速度
        :param computing_power: 车辆计算能力
        :param enterScenarioTime: 车辆进入场景的时间帧序号
        :param angle: 车辆角度
        :param lane: 车辆所在车道
        :param os_Policy: 车辆卸载策略
        :param manager_RSU: 车辆所属RSU
        """
        self.id = id
        self.x = x
        self.y = y
        self.velocity = velocity
        self.computing_power = computing_power
        self.enter_scenario_time = enter_scenario_time
        self.angle = angle
        self.lane = lane
        self.os_policy = os_policy
        self.manager_RSU = manager_RSU


    def make_task_offloading_decision(self,
                                   task,
                                   scenario,
                                   offloading_strategy_decision=0,
                                   non_learning=False):
        """ 用于任务卸载决策
        :param task: 待卸载的任务
        :param scenario: 场景对象, 包含所有车辆和RSU信息
        :param offloading_strategy_decision: 卸载策略决策
        :param non_learning: 是否为非学习训练, 默认False
        :return: 卸载决策列表, 每个元素表示 [任务卸载目标, 任务卸载比例]
        """
        def time_to_next_idle(self, scenario):
            """ 计算车辆下一个空闲时间帧序号与当前时间帧序号的差值
            :param scenario: 场景对象, 包含所有车辆和RSU信息
            :return: 车辆下一个空闲时间帧序号与当前时间帧序号的差值
            """
            return self.next_idle_time_frame - scenario.frame

        # 筛选出 [所属RSU, 优势值车辆1, 优势值车辆2, 优势值车辆3, 优势值车辆4, 任务车辆自身]
        acus = scenario.select_advantageous_vehicles(self)
        
        # 决策开始
        # 生成完整卸载决策, 列表中元素表示: [[任务卸载目标, 任务卸载比例], [,,,],,,]  后续可尝试加入 车辆距离, 信号发送功率
        if offloading_strategy_decision == 0:
            # 完全本地计算
            offloading_decision = [[acus[0], 0],
                                  [acus[1], 0],
                                  [acus[2], 0],
                                  [acus[3], 0],
                                  [acus[4], 0],
                                  [acus[5], 1]]
            # 移除逐次策略打印，保留逻辑不变
            reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
                                                                                   task_vehicle=self,
                                                                                   offloading_decision=offloading_decision)
        elif offloading_strategy_decision == 1:
            # 完全所属 RSU 进行计算
            offloading_decision = [[acus[0], 1],
                                  [acus[1], 0],
                                  [acus[2], 0],
                                  [acus[3], 0],
                                  [acus[4], 0],
                                  [acus[5], 0]]
            # 移除逐次策略打印，保留逻辑不变
            reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
                                                                                   task_vehicle=self,
                                                                                   offloading_decision=offloading_decision)
        elif offloading_strategy_decision == 2:
            # 随机卸载进行计算
            offloading_decision = [[acus[0], 0],
                                  [acus[1], 0],
                                  [acus[2], 0],
                                  [acus[3], 0],
                                  [acus[4], 0],
                                  [acus[5], 0]]
            offloading_decision[random.choice([0, 1, 2, 3, 4, 5])][1] = 1
            # 移除逐次策略打印，保留逻辑不变
            reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
                                                                                   task_vehicle=self,
                                                                                   offloading_decision=offloading_decision)
        elif offloading_strategy_decision in {4, 6}:
            if offloading_strategy_decision in is_centralized_strategy:
                if acus[0] != self.manager_RSU:
                    self.manager_RSU = acus[0]
                    self.os_policy.actor.load_state_dict(self.manager_RSU.os_policy.actor.state_dict())
            # 使用优化后的分布式 PPO 进行决策
            # 构建 PPO 当前状态, 类型为 3*5+2 的列表, 类型为预计通信连接时间, 计算能力, 下一空闲时间,  本地卸载没有预计通信连接时间
            ppo_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
                          acus[0].computing_power,
                          time_to_next_idle(acus[0], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), 
                          acus[1].computing_power,
                          time_to_next_idle(acus[1], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), 
                          acus[2].computing_power,
                          time_to_next_idle(acus[2], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]),
                          acus[3].computing_power,
                          time_to_next_idle(acus[3], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]),
                          acus[4].computing_power,
                          time_to_next_idle(acus[4], scenario),
                          self.computing_power, 
                          time_to_next_idle(self, scenario),
                          task.type]
            
            # 使用优化后的PPO Agent获取动作和相关信息
            if not non_learning:
                # 训练模式：获取动作、对数概率、价值估计和熵
                ppo_action, action_logprob, state_val, entropy = self.os_policy.get_action_with_log_prob(ppo_state)
            else:
                # 推理模式：只获取动作和价值估计
                ppo_action, state_val = self.os_policy.get_action(ppo_state)
                action_logprob = 0.0
                entropy = 0.0
            
            # 构建one-hot编码的动作
            po = [0 for _ in range(6)]
            po[ppo_action] = 1.0
            offloading_decision = [[acus[0], po[0]],
                                  [acus[1], po[1]],
                                  [acus[2], po[2]],
                                  [acus[3], po[3]],
                                  [acus[4], po[4]],
                                  [acus[5], po[5]]]
            # 移除逐次策略打印，保留逻辑不变
            
            # 计算卸载结果
            reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
                                                                                   task_vehicle=self,
                                                                                   offloading_decision=offloading_decision)
            
            # 如果不是非学习模式，添加经验到回放缓冲区
            if not non_learning:
                # 构建下一状态（执行动作后的状态）
                ppo_next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
                                  acus[0].computing_power,
                                  time_to_next_idle(acus[0], scenario),
                                  scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
                                  time_to_next_idle(acus[1], scenario),
                                  scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,  
                                  time_to_next_idle(acus[2], scenario),
                                  scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
                                  time_to_next_idle(acus[3], scenario),
                                  scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
                                  time_to_next_idle(acus[4], scenario),
                                  self.computing_power, time_to_next_idle(self, scenario),
                                  task.type]
                # 判断是否完成（考虑超时和任务质量）
                done = is_timeout or (total_time > task.maximum_allowable_delay * 0.9)
                
                # 使用优化后的add_memo接口添加经验
                if offloading_strategy_decision in is_centralized_strategy:
                    self.manager_RSU.os_policy.replay_buffer.add_memo(state=ppo_state,
                                                        action=ppo_action,
                                                        reward=reward,
                                                        value=state_val,
                                                        log_prob=action_logprob,
                                                        next_state=ppo_next_state,
                                                        done=done)
                    
                    # 更新PPO网络
                    self.manager_RSU.os_policy.update()
                else:
                    self.os_policy.replay_buffer.add_memo(state=ppo_state,
                                                        action=ppo_action,
                                                        reward=reward,
                                                        value=state_val,
                                                        log_prob=action_logprob,
                                                        next_state=ppo_next_state,
                                                        done=done)
                    
                    # 更新PPO网络
                    self.os_policy.update()
        elif offloading_strategy_decision in is_distributed_strategy:
            state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
                          time_to_next_idle(acus[0], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
                          time_to_next_idle(acus[1], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
                          time_to_next_idle(acus[2], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
                          time_to_next_idle(acus[3], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
                          time_to_next_idle(acus[4], scenario),
                          self.computing_power, 
                          time_to_next_idle(self, scenario),
                          task.type]
            actions = self.os_policy.get_action(state)
            offloading_decision = [[acus[0], actions[0]],
                                  [acus[1], actions[1]],
                                  [acus[2], actions[2]],
                                  [acus[3], actions[3]],
                                  [acus[4], actions[4]],
                                  [acus[5], actions[5]]]
            # 移除逐次策略打印，保留逻辑不变
            reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
                                                                                   task_vehicle=self,
                                                                                   offloading_decision=offloading_decision)
            next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
                                acus[0].computing_power,
                                time_to_next_idle(acus[0], scenario),
                               scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), 
                               acus[1].computing_power,
                               time_to_next_idle(acus[1], scenario),
                               scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), 
                               acus[2].computing_power,
                               time_to_next_idle(acus[2], scenario),
                               scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), 
                               acus[3].computing_power, 
                               time_to_next_idle(acus[3], scenario),
                               scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), 
                               acus[4].computing_power, 
                               time_to_next_idle(acus[4], scenario),
                               self.computing_power,
                               time_to_next_idle(self, scenario),
                               task.type]

            self.os_policy.replay_buffer.add_memo(state=state,
                                                  action=actions,
                                                  next_state=next_state,
                                                  reward=reward)
            self.os_policy.update()
        elif offloading_strategy_decision in is_centralized_strategy:
            # 集中式联邦学习
            if acus[0] != self.manager_RSU:
                self.manager_RSU = acus[0]
                self.os_policy.actor.load_state_dict(self.manager_RSU.os_policy.actor.state_dict())

            state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
                          time_to_next_idle(acus[0], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
                          time_to_next_idle(acus[1], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
                          time_to_next_idle(acus[2], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
                          time_to_next_idle(acus[3], scenario),
                          scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
                          time_to_next_idle(acus[4], scenario),
                          self.computing_power, 
                          time_to_next_idle(self, scenario),
                          task.type]
            actions = self.os_policy.get_action(state)
            offloading_decision = [[acus[0], actions[0]],
                                  [acus[1], actions[1]],
                                  [acus[2], actions[2]],
                                  [acus[3], actions[3]],
                                  [acus[4], actions[4]],
                                  [acus[5], actions[5]]]
            # 移除逐次策略打印，保留逻辑不变
            reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
                                                                                   task_vehicle=self,
                                                                                   offloading_decision=offloading_decision)
            next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
                                time_to_next_idle(acus[0], scenario),
                                scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
                                time_to_next_idle(acus[1], scenario),
                                scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
                                time_to_next_idle(acus[2], scenario),
                                scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
                                time_to_next_idle(acus[3], scenario),
                                scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
                                time_to_next_idle(acus[4], scenario),
                                self.computing_power, 
                                time_to_next_idle(self, scenario),
                                task.type]
            self.manager_RSU.os_policy.replay_buffer.add_memo(state=state,
                                                                action=actions,
                                                                next_state=next_state,
                                                                reward=reward)
            self.manager_RSU.os_policy.update()
        # elif offloading_strategy_decision == 3:
        #     # 使用分布式 DDPG 进行决策
        #     # 构建 DDPG 当前状态, 类型为 3*5+2 的列表, 类型为预计通信连接时间, 计算能力, 下一空闲时间,  本地卸载没有预计通信连接时间
        #     ddpg_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
        #                     acus[0].computing_power,
        #                   time_to_next_idle(acus[0], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                   time_to_next_idle(acus[1], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                   time_to_next_idle(acus[2], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                   time_to_next_idle(acus[3], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                   time_to_next_idle(acus[4], scenario),
        #                   self.computing_power, 
        #                   time_to_next_idle(self, scenario),
        #                   task.type]
        #     ddpg_actions = self.os_policy.get_action(ddpg_state)
        #     offloading_decision = [[acus[0], ddpg_actions[0]],
        #                           [acus[1], ddpg_actions[1]],
        #                           [acus[2], ddpg_actions[2]],
        #                           [acus[3], ddpg_actions[3]],
        #                           [acus[4], ddpg_actions[4]],
        #                           [acus[5], ddpg_actions[5]]]
        #     # 移除逐次策略打印，保留逻辑不变
        #     reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
        #                                                                            task_vehicle=self,
        #                                                                            offloading_decision=offloading_decision)
        #     ddpg_next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
        #                         acus[0].computing_power,
        #                         time_to_next_idle(acus[0], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), 
        #                        acus[1].computing_power,
        #                        time_to_next_idle(acus[1], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), 
        #                        acus[2].computing_power,
        #                        time_to_next_idle(acus[2], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), 
        #                        acus[3].computing_power, 
        #                        time_to_next_idle(acus[3], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), 
        #                        acus[4].computing_power, 
        #                        time_to_next_idle(acus[4], scenario),
        #                        self.computing_power,
        #                        time_to_next_idle(self, scenario),
        #                        task.type]

        #     self.os_policy.replay_buffer.add_memo(state=ddpg_state,
        #                                           action=ddpg_actions,
        #                                           next_state=ddpg_next_state,
        #                                           reward=reward)
        #     self.os_policy.update()
        # elif offloading_strategy_decision == 4:
        #     # 使用优化后的分布式 PPO 进行决策
        #     # 构建 PPO 当前状态, 类型为 3*5+2 的列表, 类型为预计通信连接时间, 计算能力, 下一空闲时间,  本地卸载没有预计通信连接时间
        #     ppo_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
        #                   acus[0].computing_power,
        #                   time_to_next_idle(acus[0], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), 
        #                   acus[1].computing_power,
        #                   time_to_next_idle(acus[1], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), 
        #                   acus[2].computing_power,
        #                   time_to_next_idle(acus[2], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]),
        #                   acus[3].computing_power,
        #                   time_to_next_idle(acus[3], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]),
        #                   acus[4].computing_power,
        #                   time_to_next_idle(acus[4], scenario),
        #                   self.computing_power, 
        #                   time_to_next_idle(self, scenario)]
            
        #     # 使用优化后的PPO Agent获取动作和相关信息
        #     if not non_learning:
        #         # 训练模式：获取动作、对数概率、价值估计和熵
        #         ppo_action, action_logprob, state_val, entropy = self.os_policy.get_action_with_log_prob(ppo_state)
        #     else:
        #         # 推理模式：只获取动作和价值估计
        #         ppo_action, state_val = self.os_policy.get_action(ppo_state)
        #         action_logprob = 0.0
        #         entropy = 0.0
            
        #     # 构建one-hot编码的动作
        #     po = [0 for _ in range(6)]
        #     po[ppo_action] = 1.0
        #     offloading_decision = [[acus[0], po[0]],
        #                           [acus[1], po[1]],
        #                           [acus[2], po[2]],
        #                           [acus[3], po[3]],
        #                           [acus[4], po[4]],
        #                           [acus[5], po[5]]]
        #     # 移除逐次策略打印，保留逻辑不变
            
        #     # 计算卸载结果
        #     reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
        #                                                                            task_vehicle=self,
        #                                                                            offloading_decision=offloading_decision)
            
        #     # 如果不是非学习模式，添加经验到回放缓冲区
        #     if not non_learning:
        #         # 构建下一状态（执行动作后的状态）
        #         ppo_next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
        #                           acus[0].computing_power,
        #                           time_to_next_idle(acus[0], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                           time_to_next_idle(acus[1], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,  
        #                           time_to_next_idle(acus[2], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                           time_to_next_idle(acus[3], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                           time_to_next_idle(acus[4], scenario),
        #                           self.computing_power, time_to_next_idle(self, scenario)]
        #         # 判断是否完成（考虑超时和任务质量）
        #         done = is_timeout or (total_time > task.maximum_allowable_delay * 0.9)
                
        #         # 使用优化后的add_memo接口添加经验
        #         self.os_policy.replay_buffer.add_memo(state=ppo_state,
        #                                               action=ppo_action,
        #                                               reward=reward,
        #                                               value=state_val,
        #                                               log_prob=action_logprob,
        #                                               next_state=ppo_next_state,
        #                                               done=done)
                
        #         # 更新PPO网络
        #         self.os_policy.update()
        # elif offloading_strategy_decision == 5:
        #     # 集中式DDPG卸载, 联邦学习
        #     if acus[0] != self.manager_RSU:
        #         self.manager_RSU = acus[0]
        #         self.os_policy.actor.load_state_dict(self.manager_RSU.os_policy.actor.state_dict())

        #     ddpg_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                   time_to_next_idle(acus[0], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                   time_to_next_idle(acus[1], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                   time_to_next_idle(acus[2], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                   time_to_next_idle(acus[3], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                   time_to_next_idle(acus[4], scenario),
        #                   self.computing_power, 
        #                   time_to_next_idle(self, scenario),
        #                   task.type]
        #     ddpg_actions = self.os_policy.get_action(ddpg_state)
        #     offloading_decision = [[acus[0], ddpg_actions[0]],
        #                           [acus[1], ddpg_actions[1]],
        #                           [acus[2], ddpg_actions[2]],
        #                           [acus[3], ddpg_actions[3]],
        #                           [acus[4], ddpg_actions[4]],
        #                           [acus[5], ddpg_actions[5]]]
        #     # 移除逐次策略打印，保留逻辑不变
        #     reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
        #                                                                            task_vehicle=self,
        #                                                                            offloading_decision=offloading_decision)
        #     if not non_learning:
        #         ddpg_next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                            time_to_next_idle(acus[0], scenario),
        #                            scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                            time_to_next_idle(acus[1], scenario),
        #                            scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                            time_to_next_idle(acus[2], scenario),
        #                            scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                            time_to_next_idle(acus[3], scenario),
        #                            scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                            time_to_next_idle(acus[4], scenario),
        #                            self.computing_power, 
        #                            time_to_next_idle(self, scenario),
        #                            task.type]
        #         self.manager_RSU.os_policy.replay_buffer.add_memo(state=ddpg_state,
        #                                                          action=ddpg_actions,
        #                                                          next_state=ddpg_next_state,
        #                                                          reward=reward)
        #         self.manager_RSU.os_policy.update()
        # elif offloading_strategy_decision == 6:
        #     # 集中式PPO卸载，使用优化后的PPO Agent
        #     if acus[0] != self.manager_RSU:
        #         self.manager_RSU = acus[0]
        #         self.os_policy.actor.load_state_dict(self.manager_RSU.os_policy.actor.state_dict())
                
        #         # 注意：状态维度为17维，与原始设计保持一致
            
        #     # 构建17维状态表示
        #     # 5个计算单元的信息 (3*5=15维) + 本地车辆信息 (2维) = 17维
        #     ppo_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                  time_to_next_idle(acus[0], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                  time_to_next_idle(acus[1], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                  time_to_next_idle(acus[2], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                  time_to_next_idle(acus[3], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                  time_to_next_idle(acus[4], scenario),
        #                  self.computing_power, time_to_next_idle(self, scenario)]
            
        #     # 根据训练模式选择动作获取方式
        #     if not non_learning:
        #         # 训练模式：获取动作、对数概率、价值估计和熵
        #         ppo_action, action_logprob, state_val, entropy = self.os_policy.get_action_with_log_prob(ppo_state)
        #     else:
        #         # 推理模式：只获取动作和价值估计
        #         ppo_action, state_val = self.os_policy.get_action(ppo_state)
        #         action_logprob = None
        #         entropy = None
            
        #     po = [0 for _ in range(6)]
        #     po[ppo_action] = 1.0
        #     offloading_decision = [[acus[0], po[0]],
        #                           [acus[1], po[1]],
        #                           [acus[2], po[2]],
        #                           [acus[3], po[3]],
        #                           [acus[4], po[4]],
        #                           [acus[5], po[5]]]
        #     # 移除逐次策略打印，保留逻辑不变
            
        #     reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
        #                                                                            task_vehicle=self,
        #                                                                            offloading_decision=offloading_decision)
            
        #     if not non_learning:
        #         # 构建下一状态（训练结束后的状态）
        #         ppo_next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                           time_to_next_idle(acus[0], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                           time_to_next_idle(acus[1], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                           time_to_next_idle(acus[2], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                           time_to_next_idle(acus[3], scenario),
        #                           scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                           time_to_next_idle(acus[4], scenario),
        #                           self.computing_power, time_to_next_idle(self, scenario)]
        #         # 判断任务是否完成（done信号）
        #         done = is_timeout or (total_time > task.maximum_allowable_delay * 0.9)
                
        #         # 使用优化后的PPO接口添加经验
        #         self.manager_RSU.os_policy.replay_buffer.add_memo(
        #             state=ppo_state,
        #             action=ppo_action,
        #             reward=reward,
        #             value=state_val,
        #             log_prob=action_logprob,
        #             next_state=ppo_next_state,
        #             done=done
        #         )
                
        #         # 调用PPO更新
        #         self.manager_RSU.os_policy.update()
        # elif offloading_strategy_decision == 7:
        #     # 使用分布式 DDPG 进行决策
        #     # 构建 DDPG 当前状态, 类型为 3*5+2 的列表, 类型为预计通信连接时间, 计算能力, 下一空闲时间,  本地卸载没有预计通信连接时间
        #     state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                   time_to_next_idle(acus[0], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                   time_to_next_idle(acus[1], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                   time_to_next_idle(acus[2], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                   time_to_next_idle(acus[3], scenario),
        #                   scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                   time_to_next_idle(acus[4], scenario),
        #                   self.computing_power, 
        #                   time_to_next_idle(self, scenario),
        #                   task.type]
        #     actions = self.os_policy.get_action(state)
        #     offloading_decision = [[acus[0], actions[0]],
        #                           [acus[1], actions[1]],
        #                           [acus[2], actions[2]],
        #                           [acus[3], actions[3]],
        #                           [acus[4], actions[4]],
        #                           [acus[5], actions[5]]]
        #     # 移除逐次策略打印，保留逻辑不变
        #     reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
        #                                                                            task_vehicle=self,
        #                                                                            offloading_decision=offloading_decision)
        #     next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), 
        #                         acus[0].computing_power,
        #                         time_to_next_idle(acus[0], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), 
        #                        acus[1].computing_power,
        #                        time_to_next_idle(acus[1], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), 
        #                        acus[2].computing_power,
        #                        time_to_next_idle(acus[2], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), 
        #                        acus[3].computing_power, 
        #                        time_to_next_idle(acus[3], scenario),
        #                        scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), 
        #                        acus[4].computing_power, 
        #                        time_to_next_idle(acus[4], scenario),
        #                        self.computing_power,
        #                        time_to_next_idle(self, scenario),
        #                        task.type]

        #     self.os_policy.replay_buffer.add_memo(state=state,
        #                                           action=actions,
        #                                           next_state=next_state,
        #                                           reward=reward)
        #     self.os_policy.update()
        # elif offloading_strategy_decision == 8:
        #     # 集中式SAC卸载，使用SAC Agent
        #     if acus[0] != self.manager_RSU:
        #         self.manager_RSU = acus[0]
        #         self.os_policy.actor.load_state_dict(self.manager_RSU.os_policy.actor.state_dict())
                
        #         # 注意：状态维度为17维，与原始设计保持一致
            
        #     # 构建17维状态表示
        #     # 5个计算单元的信息 (3*5=15维) + 本地车辆信息 (2维) = 17维
        #     sac_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                  time_to_next_idle(acus[0], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                  time_to_next_idle(acus[1], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                  time_to_next_idle(acus[2], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                  time_to_next_idle(acus[3], scenario),
        #                  scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                  time_to_next_idle(acus[4], scenario),
        #                  self.computing_power, time_to_next_idle(self, scenario),
        #                  task.type]
            
        #     sac_actions = self.os_policy.get_action(sac_state, add_noise=False)
            
        #     # 构建卸载决策
        #     offloading_decision = [[acus[0], sac_actions[0]],
        #                           [acus[1], sac_actions[1]],
        #                           [acus[2], sac_actions[2]],
        #                           [acus[3], sac_actions[3]],
        #                           [acus[4], sac_actions[4]],
        #                           [acus[5], sac_actions[5]]]

        #     # 计算卸载结果
        #     reward, total_time, is_timeout, total_energy = scenario.calculate_task_offloading_result(task=task,
        #                                                                            task_vehicle=self,
        #                                                                            offloading_decision=offloading_decision)
            
        #     # 构建下一状态（执行动作后的状态）
        #     sac_next_state = [scenario.predicted_wireless_channel_connection_time(acus[0], acus[5]), acus[0].computing_power,
        #                         time_to_next_idle(acus[0], scenario),
        #                         scenario.predicted_wireless_channel_connection_time(acus[1], acus[5]), acus[1].computing_power,
        #                         time_to_next_idle(acus[1], scenario),
        #                         scenario.predicted_wireless_channel_connection_time(acus[2], acus[5]), acus[2].computing_power,
        #                         time_to_next_idle(acus[2], scenario),
        #                         scenario.predicted_wireless_channel_connection_time(acus[3], acus[5]), acus[3].computing_power,
        #                         time_to_next_idle(acus[3], scenario),
        #                         scenario.predicted_wireless_channel_connection_time(acus[4], acus[5]), acus[4].computing_power,
        #                         time_to_next_idle(acus[4], scenario),
        #                         self.computing_power, time_to_next_idle(self, scenario),
        #                         task.type]
            
        #     self.manager_RSU.os_policy.replay_buffer.add_memo(
        #         state=sac_state,
        #         action=sac_actions,
        #         reward=reward,
        #         next_state=sac_next_state
        #     )
            
        #     # 调用SAC更新
        #     self.manager_RSU.os_policy.update()           
        # 决策结束
        is_offloading = offloading_decision[5][1] == 0    # 调用场景函数返回任务卸载结果
        return reward, total_time, is_timeout, is_offloading, total_energy


class RSU(CU):
    """ RSU类, 继承自CU类
    :param id: RSU ID
    :param x: RSU 初始 x 坐标
    :param y: RSU 初始 y 坐标
    :param computing_power: RSU 计算能力, 单位: G Cycles/s
    """
    def __init__(self,
                 id,
                 x,
                 y,
                 computing_power=RSU_COMPUTING_POWER): # RSU 计算能力, 单位: G Cycles/s
        self.id = id
        self.x = x
        self.y = y
        self.computing_power = computing_power
