"""
第5次，仅有随着步数的三个图，除了资源利用率下降，其余两个都能正确收敛，区分性不高
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from gym import spaces
from torch.distributions import Categorical, Normal
import matplotlib.pyplot as plt
from collections import defaultdict


# ==============================
# 1. System Configuration
# ==============================
class SimConfig:
    def __init__(self):
        # 设置固定随机种子
        self.seed = 42
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(self.seed)

        # 基本系统参数
        self.N_UE = 10  # IoT 设备数
        self.N_RB = 8  # 频域 RB 数量
        self.N_SLOT = 50  # 时域时隙数量（适当缩小，便于观察训练效果）

        # 训练轮数
        self.epochs = 200

        # PPO 参数
        self.gamma = 0.99
        self.lambda_gae = 0.95
        self.clip_ratio = 0.2
        self.ppo_epochs = 16
        self.lr = 1e-4
        self.max_grad_norm = 0.5
        self.entropy_coef = 0.01
        self.value_loss_coef = 0.5
        self.mini_batch_size = 64
        self.episodes_per_epoch = 4 #

        # 策略网络动作方差衰减
        self.action_std_decay_rate = 0.98
        self.min_action_std = 0.1

        # 信道参数
        self.rho = 0.95  # AR(1) 信道相关系数
        self.sigma_e = 0.1  # 信道估计误差方差

        # 设备需求参数（适当减少传输需求以便更易观察收敛）
        self.data_size_range = (80, 120)  # bit
        self.reliability_range = (0.8, 0.9)
        self.delay_range = (40, 60)  # ms

        # NB-IoT 参数
        self.tx_power_dbm = 23
        self.noise_figure_db = 5
        self.thermal_noise_dbm = -174
        self.rb_bandwidth = 180e3  # 每个子载波带宽

        # 鲁棒优化相关（本例中留作扩展）
        self.robust_confidence = 0.95 # 切比雪夫不等式的置信度



# ==============================
# 2. Channel Model with CSI Error
# ==============================
class ChannelModel:
    """
    基于 AR(1) 模型的时变信道，考虑 CSI 估计误差。
    """

    def __init__(self, config):
        self.config = config
        self.rho = config.rho
        self.sigma_e = config.sigma_e
        self.prev_fading = None

    def reset(self):
        self.prev_fading = None

    def generate_channel_gain(self, N_UE):
        # 大尺度衰落（随机 path loss）
        path_loss_db = np.random.uniform(80, 110, size=N_UE)
        path_loss_linear = 10 ** (-path_loss_db / 10.0)

        # 小尺度衰落：AR(1)模型
        if self.prev_fading is None:
            # 初始fading值（复数）
            real_part = np.random.normal(0, 1 / np.sqrt(2), N_UE)
            imag_part = np.random.normal(0, 1 / np.sqrt(2), N_UE)
            fading = real_part + 1j * imag_part
        else:
            inno_real = np.random.normal(0, self.sigma_e, N_UE)
            inno_imag = np.random.normal(0, self.sigma_e, N_UE)
            innovation = inno_real + 1j * inno_imag
            fading = self.rho * self.prev_fading + np.sqrt(1 - self.rho ** 2) * innovation

        self.prev_fading = fading
        true_gain = path_loss_linear * np.abs(fading) ** 2

        # 估计误差（这只是其中一项简化处理，也可考虑更复杂的失配方式）
        error = np.random.normal(0, self.sigma_e, N_UE)
        est_gain = true_gain * (1 + error)
        return est_gain, true_gain


# ==============================
# 3. IoT Device Class
# ==============================
class IoTDevice:
    """
    描述NB-IoT设备需求与传输状态。
    增加：只有满足延迟与可靠性要求才算最终成功，否则算失败。
    """

    def __init__(self, config, idx):
        self.config = config
        self.id = idx
        # 随机需求
        self.data_size = np.random.uniform(*config.data_size_range)
        self.reliability_req = np.random.uniform(*config.reliability_range)
        self.delay_req = np.random.uniform(*config.delay_range)

        # 射频参数
        self.tx_power_dbm = config.tx_power_dbm
        self.noise_figure_db = config.noise_figure_db
        self.thermal_noise_dbm = config.thermal_noise_dbm

        # 状态跟踪
        self.transmitted_data = 0.0  # 已发送比特
        self.transmission_time = 0.0  # 已消耗时延
        self.failed_count = 0
        self.trans_done = False

        # 专门用于统计可靠性
        self.num_trans_attempt = 0
        self.num_trans_success = 0

        # 记录本设备是否最终成功
        self.final_outcome = None  # True=成功, False=失败, None=尚未结束

    def calculate_achievable_rate(self, channel_gain, bandwidth_hz):
        """ 计算该RB可达到的传输速率（bit/s）。可根据SNR->速率做更多限制。 """
        tx_power_mw = 10 ** (self.tx_power_dbm / 10.0)
        noise_power_dbm = self.thermal_noise_dbm + 10 * np.log10(bandwidth_hz) + self.noise_figure_db
        noise_power_mw = 10 ** (noise_power_dbm / 10.0)
        rx_power_mw = tx_power_mw * channel_gain
        snr_linear = rx_power_mw / noise_power_mw
        spectral_eff = np.log2(1 + snr_linear)
        # 限制最大调制阶数
        spectral_eff = np.minimum(spectral_eff, 2.0)
        return spectral_eff * bandwidth_hz

    def try_transmit(self, channel_gain, ru_bandwidth_hz, duration_ms):
        """完善可靠性统计"""
        self.num_trans_attempt += 1

        # 计算SNR和可达速率
        rate_bps = self.calculate_achievable_rate(channel_gain, ru_bandwidth_hz)
        data_this_ru = rate_bps * (duration_ms / 1000.0)

        # SNR判断
        tx_power_mw = 10 ** (self.tx_power_dbm / 10.0)
        noise_power_dbm = (self.thermal_noise_dbm +
                           10 * np.log10(ru_bandwidth_hz) +
                           self.noise_figure_db)
        noise_power_mw = 10 ** (noise_power_dbm / 10.0)
        rx_power_mw = tx_power_mw * channel_gain
        snr_linear = rx_power_mw / noise_power_mw
        snr_db = 10 * np.log10(snr_linear + 1e-12)

        # 动态SNR门限,考虑可靠性需求
        required_snr = -5 + 10 * self.reliability_req  # 基于可靠性需求调整门限
        success = (snr_db >= required_snr)

        if success:
            self.num_trans_success += 1
            self.transmitted_data += data_this_ru
        else:
            self.failed_count += 1

        return success, data_this_ru

    def step_time(self, dt):
        """前进dt毫秒时延"""
        self.transmission_time += dt
        # 如果已经传完，标记trans_done，但是否满足QoS要等episode最后判断
        if self.transmitted_data >= self.data_size:
            self.trans_done = True

    def check_final_outcome(self):
        """判断终端是否成功完成传输并满足QoS要求"""
        if self.final_outcome is not None:
            return self.final_outcome

        # 1) 首先判断是否完成数据传输
        data_completed = (self.transmitted_data >= self.data_size)

        # 2) 判断是否满足时延要求
        delay_satisfied = (self.transmission_time <= self.delay_req)

        # 3) 判断是否满足可靠性要求
        current_reliability = (self.num_trans_success / max(1, self.num_trans_attempt))
        reliability_satisfied = (current_reliability >= self.reliability_req)

        # 只有同时满足三个条件才算成功
        self.final_outcome = (data_completed and delay_satisfied and reliability_satisfied)

        return self.final_outcome


# ==============================
# 4. Resource Unit Types
# ==============================
class RUType:
    """
    定义不同类型的资源单元（RU），包括频域大小、时域大小及子载波间隔。
    """

    def __init__(self, freq_size, time_size, subcarrier_spacing):
        self.freq_size = freq_size
        self.time_size = time_size
        self.subcarrier_spacing = subcarrier_spacing

    @property
    def total_bandwidth_hz(self):
        return self.freq_size * self.subcarrier_spacing


# ==============================
# 5. Resource Manager
# ==============================
class ResourceManager:
    """
    维护一个 2D 网格（RB×SLOT）的资源分配情况，并能记录分配给哪个设备。
    这里可以额外区分“贡献成功”或“浪费”的资源，需在episode结束后标记。
    """

    def __init__(self, N_RB, N_SLOT):
        self.N_RB = N_RB
        self.N_SLOT = N_SLOT
        self.reset()

    def reset(self):
        # grid[d,slot] = 多少个RB分配给设备d在该slot, 这里简化下就记录个计数
        self.allocation_grid = np.zeros((self.N_RB, self.N_SLOT), dtype=int)
        self.device_grid = np.zeros((self.N_RB, self.N_SLOT), dtype=int)

    def can_allocate(self, rb_start, rb_end, slot_start, slot_end):
        if rb_start < 0 or rb_end > self.N_RB:
            return False
        if slot_start < 0 or slot_end > self.N_SLOT:
            return False

        region = self.allocation_grid[rb_start:rb_end, slot_start:slot_end]
        return np.all(region == 0)

    def allocate(self, device_idx, rb_start, rb_end, slot_start, slot_end):
        # 将该区域标记为device_idx+1（区分0=空闲）
        self.allocation_grid[rb_start:rb_end, slot_start:slot_end] = 1
        self.device_grid[rb_start:rb_end, slot_start:slot_end] = device_idx + 1

    def get_used_rb_count(self):
        return np.count_nonzero(self.allocation_grid)

    def get_total_rb_count(self):
        return self.N_RB * self.N_SLOT

    def get_slot_utilization(self, slot):
        return np.count_nonzero(self.allocation_grid[:, slot]) / self.N_RB

    def get_current_utilization(self):
        # 返回当前所有已使用的资源占比
        return self.get_used_rb_count() / self.get_total_rb_count()

    def get_fragmentation(self):
        """可自行定义碎片化测度，若想保留，也可按原逻辑"""
        used = np.count_nonzero(self.allocation_grid)
        if used == 0:
            return 0
        # 这里简单返回 used 的离散程度
        # 更复杂方法可自行实现
        return 0


# ==============================
# 6. NB-IoT Environment (Heavily Modified)
# ==============================
class NBIoTEnv:
    """
    改进版NB-IoT调度环境：
      - 每个step对应一个时隙；
      - 动作：先选一个设备(离散) + 选一个RU类型(离散) + 选RB起点(离散)；(若希望更加连续可自行拓展)
      - 分配资源后，立刻对这个设备进行一次传输尝试，并且时间推进 RU对应的 time_size；
      - 若该时隙结束(实际上time_size可以小于1slot，但这里为简化把RU的time_size直接视为1ms或2ms等)，
        我们视为“一个调度决策完毕 -> 下一个时隙”。
      - 直到到达最大时隙数，或者所有设备都已无法再传输(都完成或都过期)，episode结束。
      - 最终根据成功设备的使用资源、失败设备的使用资源，计算资源利用率与浪费率。
    """

    def __init__(self, config, robust=False):
        self.config = config
        self.robust = robust
        self.N_UE = config.N_UE
        self.N_RB = config.N_RB
        self.N_SLOT = config.N_SLOT

        self.channel_model = ChannelModel(config)
        self.resource_manager = ResourceManager(self.N_RB, self.N_SLOT)

        # 定义一些RU类型，供动作选择
        # time_size尽量小，不然若time_size>1, 就需要更复杂地处理时隙切换
        self.ru_types = [
            RUType(freq_size=1, time_size=1, subcarrier_spacing=15000),
            RUType(freq_size=2, time_size=1, subcarrier_spacing=15000),
            RUType(freq_size=3, time_size=1, subcarrier_spacing=7500),
        ]

        # 状态空间：维度：每个设备3维(归一化信道、剩余数据、剩余时间) + 资源利用率 + 当前时隙进度
        self.state_dim_per_ue = 3
        self.state_dim = self.state_dim_per_ue * self.N_UE + 2
        self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(self.state_dim,), dtype=np.float32)

        # 动作： [device_idx离散, ru_idx离散, freq_start离散]
        # 为了简化，这里把动作拼成一个连续向量，然后手动分段 interpret
        # device_idx 取 [0, N_UE)
        # ru_idx     取 [0, len(ru_types))
        # freq_start 取 [0, N_RB)
        # 但 PPO 默认输出(连续)，需要我们自行离散化
        self.action_dim = 3
        self.action_space = spaces.Box(low=0.0, high=1.0, shape=(self.action_dim,), dtype=np.float32)

        self.metrics = defaultdict(list)

    def reset(self):
        self.current_slot = 0
        self.resource_manager.reset()
        self.channel_model.reset()

        self.devices = [IoTDevice(self.config, i) for i in range(self.N_UE)]
        # 在每个新时隙重置时，要更新一次信道
        self.est_gain, self.true_gain = self.channel_model.generate_channel_gain(self.N_UE)
        self.done_flag = False
        self.metrics.clear()

        return self._get_state()

    # 在NBIoTEnv类中替换原有的calculate_robust_factor方法
    def calculate_robust_factor(self, channel_gain):
        """基于切比雪夫不等式的鲁棒信道增益计算"""
        if not self.robust:
            return 1.0

        # 1) AR(1)信道模型的不确定性
        temporal_var = (1 - self.config.rho ** 2) * channel_gain

        # 2) CSI估计误差的不确定性
        est_var = self.config.sigma_e ** 2 * channel_gain

        # 3) 总的不确定性方差
        total_var = temporal_var + est_var

        # 4) 基于切比雪夫不等式计算概率保证
        # P(|X-μ| ≥ kσ) ≤ 1/k^2
        confidence = 0.95
        k = np.sqrt(1 / (1 - confidence))

        # 5) 计算考虑不确定性的信道增益
        safety_margin = k * np.sqrt(total_var)
        robust_gain = channel_gain - safety_margin

        return max(robust_gain / channel_gain, 0.1)

    def _get_state(self):
        """
        State拼装：对每个UE，提供 [归一化信道增益, 剩余数据比, 剩余时间比]；
                    再附加全局资源利用率、当前时隙进度
        """
        # 1) 信道增益归一化
        gains = self.est_gain / (np.max(self.est_gain) + 1e-9)

        # 2) 各UE剩余data、剩余时间
        remain_data_ratio = []
        remain_time_ratio = []
        for d in self.devices:
            r_data = max(0.0, 1.0 - d.transmitted_data / d.data_size)
            r_time = 1.0
            if d.transmission_time < d.delay_req:
                r_time = max(0.0, (d.delay_req - d.transmission_time) / d.delay_req)
            else:
                r_time = 0.0
            remain_data_ratio.append(r_data)
            remain_time_ratio.append(r_time)

        # 3) 当前资源利用率 & 时隙进度
        util = self.resource_manager.get_current_utilization()
        progress = self.current_slot / self.N_SLOT

        # 拼起来
        per_ue = []
        for i in range(self.N_UE):
            per_ue.extend([gains[i], remain_data_ratio[i], remain_time_ratio[i]])
        per_ue = np.array(per_ue, dtype=np.float32)

        global_feats = np.array([util, progress], dtype=np.float32)
        state = np.concatenate([per_ue, global_feats])
        return state

    def step(self, action):
        """
        环境的核心：基于动作分配资源，前进时隙，计算奖励。
        注意：这里把 RU 的time_size设为1ms，假设一个时隙=1ms；
              如果time_size>1，需要额外逻辑控制是否跳过多个时隙。
        """
        if self.done_flag:
            # 如果已经done了，再step无意义
            return self._get_state(), 0.0, True, {}

        # 离散化动作
        device_idx, ru_idx, freq_idx = self._interpret_action(action)

        reward = 0.0
        info = {}
        # 先检查动作可行性
        ru = self.ru_types[ru_idx]
        rb_start = freq_idx
        rb_end = rb_start + ru.freq_size
        slot_start = self.current_slot
        slot_end = slot_start + 1  # 我们假设每step就前进1个slot

        feasible = self.resource_manager.can_allocate(rb_start, rb_end, slot_start, slot_end)
        if not feasible:
            # 动作不可行，给个惩罚
            reward = -5.0
            # 环境仍然会前进到下一时隙
        else:
            # 分配资源
            self.resource_manager.allocate(device_idx, rb_start, rb_end, slot_start, slot_end)
            # 在这里进行一次传输尝试
            device = self.devices[device_idx]

            # 应用鲁棒因子
            factor = self.calculate_robust_factor(self.est_gain[device_idx])
            effective_gain = self.est_gain[device_idx] * factor

            # RU频宽
            ru_bandwidth = ru.total_bandwidth_hz * 1.0  # 简化
            duration_ms = 1.0 * ru.time_size  # 假设1 RU = 1 ms * time_size

            success, data_tx = device.try_transmit(effective_gain, ru_bandwidth, duration_ms)
            device.step_time(duration_ms)

            # if success:
            #     reward += 0.01 * data_tx
            # else:
            #     reward -= 1.0  # Increased penalty for failure
            if success:
                # 成功传输数据的奖励
                reward += 0.01 * data_tx

                # 添加QoS满足程度的奖励
                if device.transmission_time < device.delay_req:
                    reward += 2.0  # 时延满足奖励

                current_reliability = (device.num_trans_success / device.num_trans_attempt
                                       if device.num_trans_attempt > 0 else 0)
                if current_reliability >= device.reliability_req:
                    reward += 2.0  # 可靠性满足奖励
            else:
                # 失败惩罚,但考虑剩余重传机会
                remaining_time = max(0, device.delay_req - device.transmission_time)
                if remaining_time > 0:
                    reward -= 0.5  # 轻微惩罚,还有重传机会
                else:
                    reward -= 1.0  # 严重惩罚,已无法满足时延要求

            # If transmission is completed successfully and within the required QoS
            if device.trans_done:
                # Larger reward for meeting both reliability and delay constraints
                if device.check_final_outcome():
                    reward += 5.0  # Encourage success within constraints
                else:
                    reward -= 1.0  # Penalize failure to meet QoS

        # 前进到下一个时隙
        self.current_slot += 1
        # 同时下一时隙要更新信道
        if self.current_slot < self.N_SLOT:
            self.est_gain, self.true_gain = self.channel_model.generate_channel_gain(self.N_UE)

        # 检查是否完成
        done = self._check_done()
        if done:
            self.done_flag = True
            # 在结束时，统计成功设备数以及有效资源使用等
            final_reward = self._calc_final_reward()
            reward += final_reward
            info["final_reward"] = final_reward

        next_state = self._get_state()
        info["status"] = "feasible" if feasible else "infeasible"
        return next_state, reward, done, info

    def _interpret_action(self, action):
        """
        将PPO输出的(连续) action向量拆分成 (device_idx, ru_idx, freq_idx) 三个离散动作。
        """
        # device_idx
        d_idx = int(np.clip(action[0] * self.N_UE, 0, self.N_UE - 1))

        # ru_idx
        r_idx = int(np.clip(action[1] * len(self.ru_types), 0, len(self.ru_types) - 1))

        # freq_idx
        f_idx = int(np.clip(action[2] * self.N_RB, 0, self.N_RB - 1))

        return d_idx, r_idx, f_idx

    def _check_done(self):
        # 条件1: 时隙超过最大限制
        if self.current_slot >= self.N_SLOT:
            return True
        # 条件2: 所有设备都已经完结（要么超时失败、要么传完），可以提前结束
        #   不过实际上我们只有在episode结束时才会完全知道谁失败了,
        #   但可以先判断：如果所有未trans_done的设备也已经超过delay，那就没必要继续了
        all_over = True
        for d in self.devices:
            if not d.trans_done:  # 还未传完
                # 若还在可用时延内，就还有机会
                if d.transmission_time < d.delay_req:
                    all_over = False
                    break
        return all_over

    def _calc_final_reward(self):
        """计算episode结束时的最终奖励"""
        # 1) 计算成功调度率
        success_devices = sum([1 for d in self.devices if d.check_final_outcome()])
        success_rate = success_devices / self.N_UE

        # 2) 计算有效资源利用率
        total_rbs = self.N_RB * self.N_SLOT
        effective_rbs = 0
        used_rbs = 0

        for r in range(self.N_RB):
            for s in range(self.N_SLOT):
                if self.resource_manager.allocation_grid[r, s] > 0:
                    used_rbs += 1
                    dev_idx = self.resource_manager.device_grid[r, s] - 1
                    if self.devices[dev_idx].final_outcome:
                        effective_rbs += 1

        effective_utilization = effective_rbs / total_rbs
        wasted_ratio = (used_rbs - effective_rbs) / total_rbs

        # 3) 归一化奖励分量,使各项指标量级相近
        success_reward = success_rate  # 已经是[0,1]范围
        util_reward = effective_utilization  # 已经是[0,1]范围
        waste_penalty = wasted_ratio  # 已经是[0,1]范围

        # 4) 按照论文中的权重设置
        A = 1.0  # 成功率权重
        B = 0.5  # 资源利用率权重
        C = 0.3  # 资源浪费惩罚权重

        final_reward = (A * success_reward +
                        B * util_reward -
                        C * waste_penalty)

        # 5) 记录指标
        self.metrics["success_rate"].append(success_rate)
        self.metrics["effective_utilization"].append(effective_utilization)
        self.metrics["wasted_ratio"].append(wasted_ratio)

        return final_reward * 100  # 扩大reward便于训练


# ==============================
# 7. PPO Implementation
# ==============================
class PPOBuffer:
    def __init__(self, config):
        self.gamma = config.gamma
        self.lmbda = config.lambda_gae
        self.reset()

    def reset(self):
        self.states = []
        self.actions = []
        self.rewards = []
        self.dones = []
        self.log_probs = []
        self.values = []

    def store(self, state, action, reward, done, log_prob, value):
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.dones.append(done)
        self.log_probs.append(log_prob)
        self.values.append(value)

    def finish_trajectory(self):
        # 计算GAE优势函数
        adv = 0
        advantages = []
        returns = []
        for t in reversed(range(len(self.rewards))):
            next_val = 0 if t == len(self.rewards) - 1 else self.values[t + 1]
            next_non_terminal = 1.0 - self.dones[t]
            delta = self.rewards[t] + self.gamma * next_val * next_non_terminal - self.values[t]
            adv = delta + self.gamma * self.lmbda * next_non_terminal * adv
            advantages.append(adv)
        advantages.reverse()
        self.advantages = np.array(advantages, dtype=np.float32)

        for i in range(len(self.rewards)):
            returns.append(self.values[i] + self.advantages[i])
        self.returns = np.array(returns, dtype=np.float32)

    def get(self):
        return dict(
            states=np.array(self.states, dtype=np.float32),
            actions=np.array(self.actions, dtype=np.float32),
            rewards=np.array(self.rewards, dtype=np.float32),
            dones=np.array(self.dones, dtype=np.float32),
            log_probs=np.array(self.log_probs, dtype=np.float32),
            values=np.array(self.values, dtype=np.float32),
            advantages=self.advantages,
            returns=self.returns
        )

    def clear(self):
        self.reset()


class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim, config):
        super().__init__()
        self.config = config
        hidden_size = 256

        # 共享网络
        self.shared = nn.Sequential(
            nn.Linear(state_dim, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU()
        )
        # Actor (输出 action_dim=3 的连续向量)
        self.actor = nn.Sequential(
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.Linear(hidden_size // 2, action_dim)
        )
        # Critic
        self.critic = nn.Sequential(
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.Linear(hidden_size // 2, 1)
        )

        self.action_std = 0.5

    def forward(self, x):
        shared_feat = self.shared(x)
        action_logits = self.actor(shared_feat)
        value = self.critic(shared_feat)
        return action_logits, value

    def get_action_and_value(self, x):
        logits, value = self.forward(x)
        # 这里将actor输出当做均值, 再加一个方差
        cont_mean = torch.sigmoid(logits)  # 映射到[0,1]，便于后续离散化
        cont_std = torch.full_like(cont_mean, self.action_std)
        dist = Normal(cont_mean, cont_std)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum(dim=-1)
        return action, log_prob, value

    def evaluate_actions(self, x, action):
        logits, value = self.forward(x)
        cont_mean = torch.sigmoid(logits)
        cont_std = torch.full_like(cont_mean, self.action_std)
        dist = Normal(cont_mean, cont_std)
        log_probs = dist.log_prob(action).sum(dim=-1)
        entropy = dist.entropy().sum(dim=-1)
        return log_probs, value, entropy


class PPOAgent:
    def __init__(self, config, env):
        self.config = config
        self.env = env
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]

        self.ac = ActorCritic(self.state_dim, self.action_dim, config)
        self.optimizer = optim.Adam(self.ac.parameters(), lr=config.lr)
        self.buffer = PPOBuffer(config)
        self.training_history = defaultdict(list)

    def select_action(self, state):
        s = torch.FloatTensor(state).unsqueeze(0)
        action, log_prob, value = self.ac.get_action_and_value(s)
        return action.detach().numpy()[0], log_prob.item(), value.item()

    def update_policy(self):
        data = self.buffer.get()
        states = torch.FloatTensor(data["states"])
        actions = torch.FloatTensor(data["actions"])
        advantages = torch.FloatTensor(data["advantages"])
        returns = torch.FloatTensor(data["returns"])
        old_log_probs = torch.FloatTensor(data["log_probs"])

        # 归一化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        for _ in range(self.config.ppo_epochs):
            new_log_probs, values, entropy = self.ac.evaluate_actions(states, actions)
            ratio = torch.exp(new_log_probs - old_log_probs)

            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.config.clip_ratio, 1 + self.config.clip_ratio) * advantages

            policy_loss = -torch.min(surr1, surr2).mean()
            value_loss = F.mse_loss(values.squeeze(), returns)
            loss = policy_loss + self.config.value_loss_coef * value_loss - self.config.entropy_coef * entropy.mean()

            self.optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(self.ac.parameters(), self.config.max_grad_norm)
            self.optimizer.step()

            self.training_history["policy_loss"].append(policy_loss.item())
            self.training_history["value_loss"].append(value_loss.item())
            self.training_history["entropy"].append(entropy.mean().item())

    def decay_action_std(self):
        self.ac.action_std = max(self.ac.action_std * self.config.action_std_decay_rate,
                                 self.config.min_action_std)


# ==============================
# 8. Training and Evaluation
# ==============================
def train_epoch(agent, env, config):
    episode_metrics = defaultdict(list)

    for _ in range(config.episodes_per_epoch):
        state = env.reset()
        done = False
        ep_reward = 0

        while not done:
            # 1) 选择动作
            action, log_prob, val = agent.select_action(state)

            # 2) 环境交互
            next_state, reward, done, info = env.step(action)

            # 3) 存储转换
            agent.buffer.store(state, action, reward, done, log_prob, val)

            state = next_state
            ep_reward += reward

        # 4) 完成轨迹
        agent.buffer.finish_trajectory()
        episode_metrics["reward"].append(ep_reward)

        # 5) 记录性能指标
        if len(env.metrics.get("success_rate", [])) > 0:
            episode_metrics["success_rate"].append(env.metrics["success_rate"][-1])
        if len(env.metrics.get("effective_utilization", [])) > 0:
            episode_metrics["effective_utilization"].append(
                env.metrics["effective_utilization"][-1])

    # 6) 更新策略
    agent.update_policy()
    agent.buffer.clear()

    return {k: np.mean(v) for k, v in episode_metrics.items()}


def train_and_evaluate(config):
    env_robust = NBIoTEnv(config, robust=True)
    env_nonrobust = NBIoTEnv(config, robust=False)

    agent_robust = PPOAgent(config, env_robust)
    agent_nonrobust = PPOAgent(config, env_nonrobust)

    history = {"robust": defaultdict(list), "nonrobust": defaultdict(list)}

    for epoch in range(1, config.epochs + 1):
        metrics_robust = train_epoch(agent_robust, env_robust, config)
        metrics_nonrobust = train_epoch(agent_nonrobust, env_nonrobust, config)

        for k, v in metrics_robust.items():
            history["robust"][k].append(v)
        for k, v in metrics_nonrobust.items():
            history["nonrobust"][k].append(v)

        agent_robust.decay_action_std()
        agent_nonrobust.decay_action_std()

        if epoch % 10 == 0:
            sr_r = metrics_robust.get("success_rate", 0.0)
            sr_n = metrics_nonrobust.get("success_rate", 0.0)
            util_r = metrics_robust.get("effective_utilization", 0.0)
            util_n = metrics_nonrobust.get("effective_utilization", 0.0)
            print(
                f"Epoch {epoch} | Robust -> Reward: {metrics_robust.get('reward', 0):.2f}, SR: {sr_r:.2f}, Util: {util_r:.2f}")
            print(
                f"           | NonRob -> Reward: {metrics_nonrobust.get('reward', 0):.2f}, SR: {sr_n:.2f}, Util: {util_n:.2f}")

    return history


def experiment_csi_error_impact(config, error_values, train_epochs=50):
    # 为了加速演示，这里让每次训练 epochs = train_epochs
    original_epochs = config.epochs
    config.epochs = train_epochs

    results = {
        "robust": {"success_rate": [], "resource_util": [], "reward": []},
        "nonrobust": {"success_rate": [], "resource_util": [], "reward": []},
    }

    for e in error_values:
        config.sigma_e = e
        history = train_and_evaluate(config)
        # 取最后一次 epoch 的指标(也可以取最大值或平均值)
        sr_r = history["robust"]["success_rate"][-1] if len(history["robust"]["success_rate"]) > 0 else 0
        sr_n = history["nonrobust"]["success_rate"][-1] if len(history["nonrobust"]["success_rate"]) > 0 else 0

        util_r = history["robust"]["effective_utilization"][-1] if len(
            history["robust"]["effective_utilization"]) > 0 else 0
        util_n = history["nonrobust"]["effective_utilization"][-1] if len(
            history["nonrobust"]["effective_utilization"]) > 0 else 0

        rew_r = history["robust"]["reward"][-1] if len(history["robust"]["reward"]) > 0 else 0
        rew_n = history["nonrobust"]["reward"][-1] if len(history["nonrobust"]["reward"]) > 0 else 0

        results["robust"]["success_rate"].append(sr_r)
        results["robust"]["resource_util"].append(util_r)
        results["robust"]["reward"].append(rew_r)

        results["nonrobust"]["success_rate"].append(sr_n)
        results["nonrobust"]["resource_util"].append(util_n)
        results["nonrobust"]["reward"].append(rew_n)

    config.epochs = original_epochs
    return results


# ========== 作图函数 (可选) ==========
def plot_metric(title, data_robust, data_nonrobust, x_label, y_label):
    episodes = np.arange(len(data_robust))

    def smooth_curve(data, weight=0.1):
        smoothed = []
        for d in data:
            if not smoothed:
                smoothed.append(d)
            else:
                smoothed.append(smoothed[-1] * (1 - weight) + d * weight)
        return np.array(smoothed)

    s_r = smooth_curve(data_robust)
    s_n = smooth_curve(data_nonrobust)

    plt.figure(figsize=(8, 5))
    plt.plot(episodes, data_robust, color='blue', linewidth=6, alpha=0.3, label='Robust (Raw)')
    plt.plot(episodes, s_r, 'b-', linewidth=2, label='Robust (Smoothed)')
    plt.plot(episodes, data_nonrobust, color='red', linewidth=6, alpha=0.3, label='Non-robust (Raw)')
    plt.plot(episodes, s_n, 'r--', linewidth=2, label='Non-robust (Smoothed)')
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.grid(True)
    plt.legend()
    plt.show()


def plot_csi_error_impact(results, error_values):
    import matplotlib.pyplot as plt

    # 指标列表：你也可以根据需要绘制更多或更少
    metrics = [("Success Rate", "success_rate"),
               ("Resource Utilization", "resource_util"),
               ("Final Reward", "reward")]

    for title, key in metrics:
        plt.figure()
        plt.plot(error_values, results["robust"][key], 'b-o', label='Robust')
        plt.plot(error_values, results["nonrobust"][key], 'r-s', label='Non-robust')
        plt.title(f"{title} vs. CSI Error (sigma_e)")
        plt.xlabel("sigma_e")
        plt.ylabel(title)
        plt.legend()
        plt.grid(True)
        plt.show()


def main():
    # 设置随机种子
    seed = 42
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.backends.cudnn.deterministic = True

    config = SimConfig()
    history = train_and_evaluate(config)

    # 绘制对比曲线
    if len(history["robust"]["reward"]) > 0 and len(history["nonrobust"]["reward"]) > 0:
        plot_metric("Episode Reward", history["robust"]["reward"], history["nonrobust"]["reward"], "Epoch", "Reward")

    if len(history["robust"]["success_rate"]) > 0 and len(history["nonrobust"]["success_rate"]) > 0:
        plot_metric("Success Rate", history["robust"]["success_rate"], history["nonrobust"]["success_rate"], "Epoch",
                    "SR")

    if len(history["robust"]["effective_utilization"]) > 0 and len(history["nonrobust"]["effective_utilization"]) > 0:
        plot_metric("Effective Resource Utilization",
                    history["robust"]["effective_utilization"],
                    history["nonrobust"]["effective_utilization"],
                    "Epoch", "Util")

    print("Training complete.")

    # 2) 在不同sigma_e下测试并绘图
    error_values = [0.01, 0.05, 0.1, 0.2, 0.3]
    results = experiment_csi_error_impact(config, error_values, train_epochs=50)
    plot_csi_error_impact(results, error_values)


if __name__ == "__main__":
    main()
