"""
无中生有-NB-RL-完善/ImprovingSimulationProgram

"""

import argparse
import copy
import logging
import os
import pickle
import traceback
from datetime import datetime

import matplotlib.pyplot as plt
import numpy as np
import torch
# 在文件开头添加所有必要的导入
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal


# 1. 系统参数配置类
class SimConfig:
    def __init__(self):
        # 系统基本参数
        self.N_UE = 100  # IoT终端数量
        self.N_RB = 48  # 可用资源块数量
        self.N_SLOT = 16  # 时隙数量

        # 低轨卫星参数
        self.satellite_height = 550e3  # 卫星高度(m)
        self.satellite_velocity = 7.6e3  # 卫星速度(m/s)
        self.carrier_frequency = 2e9  # 载波频率(Hz)

        # 信道模型参数
        self.rho = 0.95  # AR模型相关系数
        self.sigma_e = 0.1  # 信道估计误差标准差

        # IoT终端参数范围
        self.data_size_range = (1000, 10000)  # 数据大小范围(bits)
        self.reliability_range = (0.9, 0.99)  # 可靠性要求范围
        self.delay_range = (10, 100)  # 时延约束范围(ms)

        # 链路预算参数
        self.tx_power_dbm = 23  # NB-IoT终端发射功率(dBm)
        self.noise_figure_db = 5  # 接收机噪声系数(dB)
        self.thermal_noise_dbm = -174  # 热噪声功率谱密度(dBm/Hz)
        self.bandwidth = 180e3  # NB-IoT带宽(Hz)

        # PPO算法参数
        self.gamma = 0.99  # 折扣因子
        self.lambda_gae = 0.95  # GAE参数
        self.clip_ratio = 0.2  # PPO裁剪参数
        self.lr = 3e-4  # 学习率
        self.n_epochs = 10  # 每批数据的训练轮数
        self.batch_size = 64  # 批次大小
        self.n_updates = 4  # 每个epoch的更新次数
        self.max_grad_norm = 0.5  # 梯度裁剪阈值
        self.value_coef = 0.5  # 值函数损失系数
        self.entropy_coef = 0.01  # 熵损失系数
        self.target_kl = 0.015  # 目标KL散度
        self.rollout_length = 2048  # 每次采样的步数

        # 环境参数
        self.max_steps = 1000  # 单个episode的最大步数
        self.min_successful_rate = 0.1  # 最小成功率要求
        self.resource_efficiency_threshold = 0.6  # 资源利用率阈值

        # 评估参数
        self.n_eval_episodes = 50  # 评估轮数
        self.eval_sigma_e_range = (0, 1.0, 11)  # 评估CSI误差范围(start, end, num)
        self.eval_deterministic = True  # 评估时是否使用确定性策略

    def save(self, path):
        """保存配置到文件"""
        with open(path, 'wb') as f:
            pickle.dump(self.__dict__, f)

    def load(self, path):
        """从文件加载配置"""
        with open(path, 'rb') as f:
            self.__dict__.update(pickle.load(f))

    def to_dict(self):
        """将配置转换为字典格式"""
        return copy.deepcopy(self.__dict__)

    def update(self, config_dict):
        """使用字典更新配置"""
        self.__dict__.update(config_dict)

    def validate(self):
        """验证配置的有效性"""
        assert self.N_UE > 0, "终端数量必须大于0"
        assert self.N_RB > 0, "资源块数量必须大于0"
        assert self.N_SLOT > 0, "时隙数量必须大于0"
        assert 0 <= self.rho <= 1, "AR模型相关系数必须在[0,1]范围内"
        assert self.sigma_e >= 0, "信道估计误差标准差必须非负"
        assert self.data_size_range[0] < self.data_size_range[1], "数据大小范围无效"
        assert 0 <= self.reliability_range[0] < self.reliability_range[1] <= 1, "可靠性范围无效"
        assert 0 < self.delay_range[0] < self.delay_range[1], "时延范围无效"
        assert 0 < self.gamma < 1, "折扣因子必须在(0,1)范围内"
        assert self.batch_size > 0, "批次大小必须大于0"
        assert self.max_steps > 0, "最大步数必须大于0"

        return True

    def __str__(self):
        """返回配置的字符串表示"""
        return f"""SimConfig:
    System Parameters:
        N_UE: {self.N_UE}
        N_RB: {self.N_RB}
        N_SLOT: {self.N_SLOT}

    Satellite Parameters:
        Height: {self.satellite_height / 1000:.1f} km
        Velocity: {self.satellite_velocity / 1000:.1f} km/s
        Carrier Frequency: {self.carrier_frequency / 1e9:.1f} GHz

    Channel Parameters:
        AR coefficient (rho): {self.rho}
        CSI error std (sigma_e): {self.sigma_e}

    IoT Device Parameters:
        Data Size Range: {self.data_size_range} bits
        Reliability Range: {self.reliability_range}
        Delay Range: {self.delay_range} ms

    Link Budget:
        Tx Power: {self.tx_power_dbm} dBm
        Noise Figure: {self.noise_figure_db} dB
        Bandwidth: {self.bandwidth / 1e3:.1f} kHz

    Algorithm Parameters:
        Learning Rate: {self.lr}
        Batch Size: {self.batch_size}
        Number of Epochs: {self.n_epochs}
        Rollout Length: {self.rollout_length}

    Environment Parameters:
        Max Steps: {self.max_steps}
        Min Success Rate: {self.min_successful_rate}
        Resource Efficiency Threshold: {self.resource_efficiency_threshold}
        """


from scipy.constants import c  # 光速


# 2. 信道模型类
class ChannelModel:
    def __init__(self, config):
        self.config = config

        # 低轨卫星参数
        self.satellite_height = 550e3  # 卫星高度(m)
        self.satellite_velocity = 7.6e3  # 卫星速度(m/s)
        self.carrier_frequency = 2e9  # 载波频率(Hz)
        self.wavelength = c / self.carrier_frequency

        # 信道参数
        self.rho = config.rho  # AR模型相关系数
        self.sigma_e = config.sigma_e  # 信道估计误差标准差

        # 初始化卫星位置和运动参数
        self.satellite_position = np.array([0, 0, self.satellite_height])
        self.satellite_velocity_vector = np.array([self.satellite_velocity, 0, 0])

        # 初始化用户位置和相关参数
        self.initialize_user_positions()

        # 保存上一时刻的小尺度衰落
        self.previous_fading = None

    def initialize_user_positions(self):
        """初始化用户位置和相关参数"""
        # 计算最大覆盖角
        max_coverage_angle = np.arccos(6371e3 / (6371e3 + self.satellite_height))

        # 在覆盖区域内随机生成用户位置
        # 使用极坐标系统，考虑地球曲率
        n_users = self.config.N_UE

        # 随机生成方位角和仰角
        self.user_azimuth = np.random.uniform(0, 2 * np.pi, n_users)
        self.user_elevation = np.random.uniform(0, max_coverage_angle, n_users)

        # 生成地面距离（考虑地球曲率）
        earth_radius = 6371e3  # 地球半径(m)
        central_angle = self.user_elevation  # 中心角等于仰角

        # 计算用户的3D坐标
        ground_distance = earth_radius * central_angle
        self.user_positions = np.zeros((n_users, 3))

        for i in range(n_users):
            x = ground_distance[i] * np.cos(self.user_azimuth[i])
            y = ground_distance[i] * np.sin(self.user_azimuth[i])
            z = 0  # 假设用户在地面上
            self.user_positions[i] = [x, y, z]

        # 初始化用户到卫星的距离和角度
        self.update_user_satellite_geometry()

    def update_user_satellite_geometry(self):
        """更新用户与卫星间的几何关系"""
        # 计算用户到卫星的相对位置向量
        relative_positions = self.satellite_position - self.user_positions

        # 计算距离
        self.user_distances = np.linalg.norm(relative_positions, axis=1)

        # 计算仰角（考虑地球曲率）
        self.user_elevation = np.arcsin(
            relative_positions[:, 2] / self.user_distances
        )

        # 计算方位角
        self.user_azimuth = np.arctan2(
            relative_positions[:, 1],
            relative_positions[:, 0]
        )

        # 计算多普勒频移
        self.update_doppler_shift()

    def update_doppler_shift(self):
        """计算多普勒频移"""
        # 计算用户到卫星的单位向量
        relative_positions = self.satellite_position - self.user_positions
        unit_vectors = relative_positions / self.user_distances[:, np.newaxis]

        # 计算速度在视线方向的投影
        relative_velocities = np.dot(unit_vectors, self.satellite_velocity_vector)

        # 计算多普勒频移
        self.doppler_shifts = relative_velocities / self.wavelength

        # 更新相干时间
        self.coherence_time = 0.423 / np.max(np.abs(self.doppler_shifts))

    def calculate_path_loss(self):
        """计算路径损耗"""
        # 自由空间路径损耗
        free_space_loss_db = 20 * np.log10(4 * np.pi * self.user_distances / self.wavelength)

        # 大气损耗（简化模型）
        atmospheric_loss_db = 0.1 * self.user_distances / 1000  # 假设每公里0.1dB损耗

        # 雨衰（简化模型）
        rain_loss_db = np.zeros_like(self.user_distances)  # 晴空情况

        # 总路径损耗
        total_loss_db = free_space_loss_db + atmospheric_loss_db + rain_loss_db

        return 10 ** (-total_loss_db / 10)

    def generate_small_scale_fading(self, previous_fading=None):
        """生成小尺度衰落（基于AR(1)模型）"""
        n_users = self.config.N_UE

        if previous_fading is None:
            # 初始化瑞利衰落
            real_part = np.random.normal(0, 1 / np.sqrt(2), n_users)
            imag_part = np.random.normal(0, 1 / np.sqrt(2), n_users)
            fading = real_part + 1j * imag_part
        else:
            # AR(1)模型更新
            # 计算创新项的标准差，确保总功率保持一致
            innovation_std = np.sqrt(1 - self.rho ** 2) / np.sqrt(2)

            # 生成复高斯创新项
            innovation_real = np.random.normal(0, innovation_std, n_users)
            innovation_imag = np.random.normal(0, innovation_std, n_users)
            innovation = innovation_real + 1j * innovation_imag

            # AR(1)更新
            fading = self.rho * previous_fading + innovation

        return fading

    def update_channel_state(self, time_step, previous_fading):
        """更新信道状态
        Args:
            time_step: 时间步长(s)
            previous_fading: 上一时刻的小尺度衰落
        Returns:
            tuple: (estimated_gain, true_gain, current_fading)
        """
        # 1. 更新卫星位置
        self.satellite_position += self.satellite_velocity_vector * time_step

        # 2. 更新用户与卫星的几何关系
        self.update_user_satellite_geometry()

        # 3. 计算路径损耗
        path_loss = self.calculate_path_loss()

        # 4. 生成小尺度衰落
        current_fading = self.generate_small_scale_fading(previous_fading)

        # 5. 计算真实信道增益
        true_gain = path_loss * np.abs(current_fading) ** 2

        # 6. 添加CSI估计误差（对数正态分布）
        error = np.random.lognormal(0, self.sigma_e, self.config.N_UE)
        estimated_gain = true_gain * error

        return estimated_gain, true_gain, current_fading

    def get_channel_statistics(self):
        """获取信道统计信息"""
        return {
            'doppler_shifts': self.doppler_shifts,
            'coherence_time': self.coherence_time,
            'user_distances': self.user_distances,
            'user_elevation': self.user_elevation,
            'path_loss': self.calculate_path_loss()
        }

    def generate_channel_gain(self):
        """生成初始信道增益
        Returns:
            tuple: (estimated_gain, true_gain, current_fading)
        """
        # 1. 计算路径损耗
        path_loss = self.calculate_path_loss()

        # 2. 生成小尺度衰落（初始）
        current_fading = self.generate_small_scale_fading(self.previous_fading)

        # 3. 计算真实信道增益
        true_gain = path_loss * np.abs(current_fading) ** 2

        # 4. 添加CSI估计误差（对数正态分布）
        error = np.random.lognormal(0, self.sigma_e, self.config.N_UE)
        estimated_gain = true_gain * error

        return estimated_gain, true_gain, current_fading


# 3. IoT终端类
class IoTDevice:
    def __init__(self, config):
        # 基本参数初始化
        self.data_size = np.random.uniform(*config.data_size_range)  # bits
        self.reliability = np.random.uniform(*config.reliability_range)
        self.delay_constraint = np.random.uniform(*config.delay_range)  # ms

        # 传输状态追踪
        self.allocated_resources = []  # [(rb_start, n_rb, n_slot)]
        self.transmitted_data = 0  # 已传输的数据量
        self.transmission_complete = False
        self.transmission_time = 0  # 实际传输时间
        self.failed_transmissions = 0
        self.transmission_history = []  # 详细传输记录

        # QoS统计
        self.qos_violations = {
            'delay': 0,
            'reliability': 0,
            'data_rate': 0
        }

        # 时间窗口参数 (用于可靠性计算)
        self.reliability_window = 100  # ms
        self.window_transmissions = []  # 记录时间窗口内的传输

    def update_transmission_status(self, achievable_rate, duration_ms, snr_db):
        """更新传输状态
        Args:
            achievable_rate: 可达速率 (bits/s)
            duration_ms: 传输持续时间 (ms)
            snr_db: 实际SNR (dB)
        Returns:
            bool: 传输是否成功
        """
        # 如果已经完成传输，直接返回True
        if self.transmission_complete:
            return True

        # 计算本次传输可传输的数据量
        duration_s = duration_ms / 1000.0
        theoretic_bits = achievable_rate * duration_s

        # NB-IoT最小SNR要求 (可根据具体场景调整)
        min_required_snr_db = -4  # QPSK的最小要求SNR

        # 判断传输是否成功
        transmission_success = snr_db >= min_required_snr_db

        # 记录本次传输结果
        transmission_record = {
            'time': self.transmission_time,
            'duration': duration_ms,
            'snr_db': snr_db,
            'success': transmission_success,
            'data_transmitted': theoretic_bits if transmission_success else 0
        }

        # 更新传输历史
        self.transmission_history.append(transmission_record)

        # 更新时间窗口内的传输记录
        self._update_reliability_window(transmission_record)

        if transmission_success:
            self.transmitted_data += theoretic_bits
        else:
            self.failed_transmissions += 1

        # 更新传输时间
        self.transmission_time += duration_ms

        # 检查是否完成所有数据传输
        if self.transmitted_data >= self.data_size:
            self.transmission_complete = True
            return True

        # 检查QoS违反情况
        self._check_qos_violations()

        # 若已超时或可靠性要求无法满足，返回False
        if (self.transmission_time > self.delay_constraint or
                self._get_current_reliability() < self.reliability):
            return False

        return True

    def _update_reliability_window(self, transmission_record):
        """更新可靠性计算窗口"""
        current_time = transmission_record['time']

        # 移除窗口外的记录
        self.window_transmissions = [
            record for record in self.window_transmissions
            if current_time - record['time'] <= self.reliability_window
        ]

        # 添加新记录
        self.window_transmissions.append(transmission_record)

    def _get_current_reliability(self):
        """计算当前可靠性"""
        if not self.window_transmissions:
            return 1.0

        successful = sum(1 for record in self.window_transmissions if record['success'])
        return successful / len(self.window_transmissions)

    def _check_qos_violations(self):
        """检查QoS违反情况"""
        # 检查时延违反
        if self.transmission_time > self.delay_constraint:
            self.qos_violations['delay'] += 1

        # 检查可靠性违反
        current_reliability = self._get_current_reliability()
        if current_reliability < self.reliability:
            self.qos_violations['reliability'] += 1

        # 检查数据率要求
        if self.transmission_time > 0:
            actual_rate = self.transmitted_data / (self.transmission_time / 1000.0)
            required_rate = self.data_size / (self.delay_constraint / 1000.0)
            if actual_rate < required_rate:
                self.qos_violations['data_rate'] += 1

    def get_transmission_efficiency(self):
        """计算传输效率"""
        if self.transmission_time == 0:
            return 0

        # 计算实际传输速率
        actual_rate = self.transmitted_data / (self.transmission_time / 1000.0)

        # 计算理想传输速率
        ideal_rate = self.data_size / (self.delay_constraint / 1000.0)

        return actual_rate / ideal_rate if ideal_rate > 0 else 0

    def get_resource_efficiency(self):
        """计算资源使用效率"""
        total_resources = sum(n_rb * n_slot for _, n_rb, n_slot in self.allocated_resources)
        if total_resources == 0:
            return 0

        return self.transmitted_data / (self.data_size * total_resources)

    def get_qos_satisfaction(self):
        """获取QoS满意度指标"""
        # 时延满意度
        delay_satisfaction = min(1.0, self.delay_constraint / max(self.transmission_time, 1))

        # 可靠性满意度
        reliability_satisfaction = min(1.0, self._get_current_reliability() / self.reliability)

        # 数据完成度
        data_satisfaction = min(1.0, self.transmitted_data / self.data_size)

        # 返回综合满意度
        return {
            'delay': delay_satisfaction,
            'reliability': reliability_satisfaction,
            'data': data_satisfaction,
            'overall': min(delay_satisfaction, reliability_satisfaction, data_satisfaction)
        }


# 4. 环境类
class LEONBIoTEnv:
    def __init__(self, config, robust=True):
        self.config = config
        self.channel_model = ChannelModel(config)
        self.reset()
        self.snr_requirement = 10  # dB
        self.subframe_duration = 1  # ms
        self.successful_transmissions = set()
        self.resource_usage_map = np.zeros((config.N_RB, config.N_SLOT))
        self.robust = robust
        self.reliability_target = 0.95
        self.max_steps = 1000  # 添加最大步数限制
        self.current_step = 0

        # 添加链路预算参数
        self.tx_power_dbm = 23  # NB-IoT终端发射功率(dBm)
        self.noise_figure_db = 5  # 接收机噪声系数(dB)
        self.thermal_noise_dbm = -174  # 热噪声功率谱密度(dBm/Hz)
        self.bandwidth = 180e3  # NB-IoT带宽(Hz)

        # 初始化设备列表
        self.devices = []

        # 初始化传输记录
        self.successful_transmissions = set()

        # 初始化step计数器
        self.current_step = 0

        # 初始化信道状态
        self.previous_fading = None
        self.channel_gains = None

        # 调用reset完成初始化
        self.reset()

    def reset(self):
        """重置环境状态
        Returns:
            ndarray: 初始观察状态
        """
        # 重置步数计数器
        self.current_step = 0

        # 重置设备列表
        self.devices = [IoTDevice(self.config) for _ in range(self.config.N_UE)]

        # 重置资源网格
        self.resource_grid = np.zeros((self.config.N_RB, self.config.N_SLOT))
        self.resource_usage_map = np.zeros((self.config.N_RB, self.config.N_SLOT))

        # 重置成功传输记录
        self.successful_transmissions = set()

        # 重置信道状态
        self.previous_fading = None
        self.channel_gains = self.channel_model.generate_channel_gain()

        # 获取初始状态
        initial_state = self._get_state()

        return initial_state

    def _get_state(self):
        """构建当前状态向量
        Returns:
            ndarray: 状态向量，包含:
            - 信道增益估计
            - 资源网格占用情况
            - 设备QoS需求
        """
        # 获取信道增益估计
        estimated_gains = self.channel_gains[0]  # 使用估计的信道增益

        # 获取资源网格状态
        resource_grid_state = self.resource_grid.flatten()

        # 获取设备状态信息
        device_states = np.array([
            [d.data_size - d.transmitted_data,  # 剩余数据量
             d.reliability,  # 可靠性要求
             d.delay_constraint - d.transmission_time]  # 剩余时延
            for d in self.devices
        ]).flatten()

        # 组合状态向量
        state = np.concatenate([
            estimated_gains,
            resource_grid_state,
            device_states
        ])

        return state

    def _parse_action(self, action):
        """将PPO输出的动作转换为具体的资源分配参数"""
        # 确保动作在[0,1]范围内
        action = np.clip(action, 0, 1)

        # 设备选择 (离散化)
        device_idx = int(action[0] * (self.config.N_UE - 1))

        # 资源块起始位置 (考虑需要预留空间给n_rb)
        max_rb_start = self.config.N_RB - 1
        rb_start = int(action[1] * max_rb_start)

        # 资源块数量 (NB-IoT规范: 1-12个RB)
        n_rb = max(1, min(12, int(action[2] * 12) + 1))

        # 确保rb_start + n_rb不超过总RB数
        rb_start = min(rb_start, self.config.N_RB - n_rb)

        # 时隙数量 (NB-IoT规范: 1-8个时隙)
        n_slot = max(1, min(8, int(action[3] * 8) + 1))

        return device_idx, rb_start, n_rb, n_slot

    def step(self, action):
        """执行一步动作
        Args:
            action: 来自PPO的4维动作 [device_idx, rb_start, n_rb, n_slot]
        Returns:
            tuple: (next_state, reward, done, info)
        """
        self.current_step += 1

        # 1. 解析动作
        device_idx, rb_start, n_rb, n_slot = self._parse_action(action)

        # 2. 更新信道状态
        estimated_gain, true_gain, current_fading = self.channel_model.update_channel_state(
            self.subframe_duration / 1000.0,
            self.previous_fading
        )
        self.previous_fading = current_fading
        self.channel_gains = (estimated_gain, true_gain, current_fading)

        # 3. 验证资源分配是否可行
        if not self._is_allocation_feasible(rb_start, n_rb, n_slot):
            return self._get_state(), -1.0, False, {"allocation_success": False}

        # 4. 执行资源分配
        allocation_success = self._execute_allocation(device_idx, rb_start, n_rb, n_slot)

        # 5. 计算奖励
        reward = self._calculate_reward(allocation_success, device_idx)

        # 6. 检查是否需要终止
        done = self._check_termination()

        # 7. 获取下一个状态
        next_state = self._get_state()

        # 8. 准备信息字典
        info = {
            "allocation_success": allocation_success,
            "service_rate": len(self.successful_transmissions) / self.config.N_UE,
            "resource_utilization": np.sum(self.resource_usage_map) / (self.config.N_RB * self.config.N_SLOT)
        }

        return next_state, reward, done, info

    def _check_termination(self):
        """检查是否应该终止episode"""
        # 1. 检查是否达到最大步数
        if self.current_step >= self.max_steps:
            return True

        # 2. 检查是否所有设备都完成传输或确定无法完成
        all_devices_done = all(
            d.transmission_complete or
            d.transmission_time >= d.delay_constraint or
            d.get_transmission_efficiency() < 0.1  # 添加效率门限
            for d in self.devices
        )
        if all_devices_done:
            return True

        # 3. 检查是否还有可用资源
        if np.sum(self.resource_grid == 0) == 0:
            return True

        return False

    def _execute_allocation(self, device_idx, rb_start, n_rb, n_slot):
        """执行资源分配
        Args:
            device_idx: 设备索引
            rb_start: 起始资源块
            n_rb: 资源块数量
            n_slot: 时隙数量
        Returns:
            bool: 分配是否成功
        """
        device = self.devices[device_idx]

        # 检查设备是否已完成或超时
        if device.transmission_complete or device.transmission_time >= device.delay_constraint:
            return False

        # 获取当前信道增益
        estimated_gain = self.channel_gains[0][device_idx]

        # 计算鲁棒裕度（如果启用鲁棒模式）
        if self.robust:
            channel_gain = self._calculate_robust_channel_gain(estimated_gain, device)
        else:
            channel_gain = estimated_gain

        # 分配资源并更新设备状态
        self.resource_grid[rb_start:rb_start + n_rb, :n_slot] = 1
        device.allocated_resources.append((rb_start, n_rb, n_slot))

        # 计算实际SNR
        snr_db = self._calculate_snr(channel_gain)

        # 计算可达速率
        achievable_rate = self._calculate_achievable_rate(snr_db)

        # 更新传输状态
        transmission_success = device.update_transmission_status(
            achievable_rate,
            self.subframe_duration * n_slot,
            snr_db
        )

        # 更新资源使用记录
        if transmission_success:
            self.successful_transmissions.add(device_idx)
            self.resource_usage_map[rb_start:rb_start + n_rb, :n_slot] = 1

        return transmission_success

    def _calculate_robust_channel_gain(self, estimated_gain, device):
        """计算考虑鲁棒性的信道增益"""
        # 基于切比雪夫不等式计算裕度
        epsilon = 1 - device.reliability
        variance = self.config.sigma_e ** 2
        margin = np.sqrt(variance / epsilon)

        # 计算考虑裕度后的信道增益
        robust_gain = estimated_gain / (1 + margin)
        return max(robust_gain, 1e-10)  # 确保增益为正

    def _calculate_snr(self, channel_gain):
        """计算SNR (dB)"""
        # 转换发射功率到dBm
        rx_power_dbm = self.config.tx_power_dbm + 10 * np.log10(channel_gain)

        # 计算噪声功率
        noise_power_dbm = (self.config.thermal_noise_dbm +
                           10 * np.log10(self.config.bandwidth) +
                           self.config.noise_figure_db)

        return rx_power_dbm - noise_power_dbm

    def _calculate_achievable_rate(self, snr_db):
        """计算可达速率 (bits/s)"""
        # 转换SNR到线性尺度
        snr_linear = 10 ** (snr_db / 10)

        # 计算频谱效率
        spectral_efficiency = np.log2(1 + snr_linear)

        # 考虑NB-IoT调制限制（QPSK最大频谱效率为2 bits/s/Hz）
        spectral_efficiency = min(spectral_efficiency, 2.0)

        return spectral_efficiency * self.config.bandwidth

    def _calculate_reward(self, allocation_success, device_idx):
        """计算奖励"""
        device = self.devices[device_idx]
        reward = 0.0

        if allocation_success:
            # 基础奖励：成功传输数据
            reward += 1.0

            # QoS满足度奖励
            qos_satisfaction = device.get_qos_satisfaction()
            reward += qos_satisfaction['overall']

            # 资源效率奖励
            resource_efficiency = device.get_resource_efficiency()
            reward += 0.5 * resource_efficiency

        else:
            # 失败惩罚
            reward -= 0.5

            # 如果是由于不可行分配导致的失败，给予较小惩罚
            if not self._is_allocation_feasible(device_idx, 1, 1):
                reward = -0.1

        return reward

    def _is_allocation_feasible(self, rb_start, n_rb, n_slot):
        """检查资源分配是否可行
        Args:
            rb_start: 起始资源块
            n_rb: 资源块数量
            n_slot: 时隙数量
        Returns:
            bool: 是否可行
        """
        # 1. 检查范围是否有效
        if rb_start < 0 or n_rb <= 0 or n_slot <= 0:
            return False

        # 2. 检查是否超出资源网格范围
        if rb_start + n_rb > self.config.N_RB or n_slot > self.config.N_SLOT:
            return False

        # 3. 检查请求的资源块是否已被占用
        resource_slice = self.resource_grid[rb_start:rb_start + n_rb, :n_slot]
        return np.all(resource_slice == 0)  # 所有请求的资源块都必须是空闲的

    def _calculate_fragmentation(self):
        """计算资源碎片化程度
        Returns:
            float: 碎片化程度 [0,1]，0表示无碎片，1表示完全碎片化
        """
        if np.all(self.resource_grid == 0):  # 如果网格全空
            return 0.0

        if np.all(self.resource_grid == 1):  # 如果网格全满
            return 0.0

        fragmentation = 0
        total_boundaries = 0

        # 1. 计算水平方向的碎片
        for i in range(self.config.N_RB):
            for j in range(self.config.N_SLOT - 1):
                if self.resource_grid[i, j] != self.resource_grid[i, j + 1]:
                    fragmentation += 1
                total_boundaries += 1

        # 2. 计算垂直方向的碎片
        for j in range(self.config.N_SLOT):
            for i in range(self.config.N_RB - 1):
                if self.resource_grid[i, j] != self.resource_grid[i + 1, j]:
                    fragmentation += 1
                total_boundaries += 1

        # 3. 计算碎片化程度
        return fragmentation / total_boundaries if total_boundaries > 0 else 0.0




# 5. 状态统计类
class RunningStats:
    def __init__(self, state_dim):
        self.dim = state_dim
        self.n = 0
        self.mean = np.zeros(state_dim)
        self.mean_diff = np.zeros(state_dim)
        self.variance = np.zeros(state_dim)
        self.std = np.zeros(state_dim)

    def update(self, x):
        self.n += 1

        if self.n == 1:
            self.mean = x
            self.std = np.ones_like(x)
        else:
            old_mean = self.mean.copy()
            self.mean += (x - self.mean) / self.n
            self.mean_diff += (x - old_mean) * (x - self.mean)
            self.variance = self.mean_diff / self.n
            self.std = np.sqrt(self.variance) + 1e-8


class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(ActorCritic, self).__init__()

        # 状态归一化参数
        self.state_mean = nn.Parameter(torch.zeros(state_dim), requires_grad=False)
        self.state_std = nn.Parameter(torch.ones(state_dim), requires_grad=False)

        # 共享特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(state_dim, 512),
            nn.LayerNorm(512),
            nn.ReLU(),
            nn.Dropout(0.1)
        )

        # Actor网络
        self.actor_hidden = nn.Sequential(
            nn.Linear(512, 256),
            nn.LayerNorm(256),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(256, 128),
            nn.LayerNorm(128),
            nn.ReLU()
        )

        # 分离均值和标准差输出
        self.actor_mean = nn.Sequential(
            nn.Linear(128, action_dim),
            nn.Sigmoid()  # 确保动作在[0,1]范围内
        )

        self.actor_log_std = nn.Parameter(torch.zeros(action_dim))

        # Critic网络
        self.critic = nn.Sequential(
            nn.Linear(512, 256),
            nn.LayerNorm(256),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(256, 128),
            nn.LayerNorm(128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

        # 初始化权重
        self.apply(self._init_weights)

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.orthogonal_(module.weight, gain=np.sqrt(2))
            if module.bias is not None:
                module.bias.data.zero_()

    def normalize_state(self, state):
        """状态归一化"""
        return (state - self.state_mean) / (self.state_std + 1e-8)

    def forward(self, state):
        """前向传播"""
        state = self.normalize_state(state)
        features = self.feature_extractor(state)

        # Actor输出
        actor_features = self.actor_hidden(features)
        action_mean = self.actor_mean(actor_features)
        action_std = self.actor_log_std.exp()

        # Critic输出
        value = self.critic(features)

        return action_mean, action_std, value

    def get_action(self, state, deterministic=False):
        """获取动作"""
        action_mean, action_std, _ = self(state)

        if deterministic:
            return action_mean, None

        # 构建正态分布
        dist = Normal(action_mean, action_std)

        # 重参数化采样
        action = dist.rsample()

        # 计算动作的对数概率
        log_prob = dist.log_prob(action).sum(dim=-1)

        # 裁剪动作到[0,1]范围
        action = torch.clamp(action, 0.0, 1.0)

        return action, log_prob

    def evaluate_action(self, state, action):
        """评估动作"""
        action_mean, action_std, value = self(state)

        # 构建分布
        dist = Normal(action_mean, action_std)

        # 计算动作的对数概率
        log_prob = dist.log_prob(action).sum(dim=-1)

        # 计算熵
        entropy = dist.entropy().sum(dim=-1)

        return log_prob, entropy, value


# 7. PPO算法类
class PPO:
    def __init__(self, config, env):
        self.config = config
        self.env = env
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 获取状态和动作维度
        self.state_dim = self._get_state_dim()
        self.action_dim = 4  # [device_idx, rb_start, n_rb, n_slot]

        # 创建策略网络
        self.actor_critic = ActorCritic(self.state_dim, self.action_dim).to(self.device)

        # 创建优化器
        self.optimizer = optim.Adam([
            {'params': self.actor_critic.feature_extractor.parameters()},
            {'params': self.actor_critic.actor_hidden.parameters()},
            {'params': self.actor_critic.actor_mean.parameters()},
            {'params': self.actor_critic.actor_log_std, 'lr': config.lr * 0.1},
            {'params': self.actor_critic.critic.parameters()}
        ], lr=config.lr)

        # 创建学习率调度器
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='max', factor=0.5, patience=5, verbose=True
        )

        # 经验回放缓冲区
        self.buffer = RolloutBuffer()

        # 训练统计
        self.train_stats = {
            'episode_rewards': [],
            'success_rates': [],
            'resource_utils': [],
            'policy_losses': [],
            'value_losses': [],
            'entropy_losses': []
        }

    def train(self, total_timesteps):
        """训练PPO算法"""
        # 初始化进度跟踪
        timesteps_elapsed = 0
        best_reward = float('-inf')

        while timesteps_elapsed < total_timesteps:
            # 收集轨迹数据
            with torch.no_grad():
                timesteps_elapsed += self._collect_rollouts()

            # 计算优势和回报
            advantages, returns = self._compute_advantages_and_returns()

            # 多次更新策略
            for _ in range(self.config.n_epochs):
                policy_loss, value_loss, entropy_loss = self._update_policy(advantages, returns)

                # 记录训练统计
                self.train_stats['policy_losses'].append(policy_loss)
                self.train_stats['value_losses'].append(value_loss)
                self.train_stats['entropy_losses'].append(entropy_loss)

            # 评估当前策略
            eval_stats = self._evaluate_policy()

            # 更新学习率
            self.scheduler.step(eval_stats['mean_reward'])

            # 保存最佳模型
            if eval_stats['mean_reward'] > best_reward:
                best_reward = eval_stats['mean_reward']
                self.save_model('best_model.pt')

            # 清空缓冲区
            self.buffer.clear()

            # 打印训练进度
            self._log_training_progress(timesteps_elapsed, eval_stats)

    def _collect_rollouts(self):
        """收集轨迹数据"""
        timesteps_collected = 0

        while timesteps_collected < self.config.rollout_length:
            state = self.env.reset()
            episode_reward = 0
            done = False

            while not done:
                # 转换状态为tensor
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)

                # 获取动作
                with torch.no_grad():
                    action, log_prob = self.actor_critic.get_action(state_tensor)
                action = action.cpu().numpy()[0]

                # 执行动作
                next_state, reward, done, info = self.env.step(action)

                # 存储转换
                self.buffer.add(
                    state, action, reward, next_state,
                    log_prob.item(), done, info
                )

                state = next_state
                episode_reward += reward
                timesteps_collected += 1

                if done:
                    break

            # 记录episode统计
            self.train_stats['episode_rewards'].append(episode_reward)
            self.train_stats['success_rates'].append(info['service_rate'])
            self.train_stats['resource_utils'].append(info['resource_utilization'])

        return timesteps_collected

    def _compute_advantages_and_returns(self):
        """计算优势函数和回报"""
        # 获取所有状态的值估计
        states = torch.FloatTensor(self.buffer.states).to(self.device)
        with torch.no_grad():
            _, _, values = self.actor_critic(states)
        values = values.squeeze()

        # 计算GAE
        advantages = []
        returns = []
        gae = 0

        for t in reversed(range(len(self.buffer))):
            if t == len(self.buffer) - 1:
                next_value = 0 if self.buffer.dones[t] else values[t]
            else:
                next_value = values[t + 1]

            # 计算TD误差
            delta = (self.buffer.rewards[t] +
                     self.config.gamma * next_value * (1 - self.buffer.dones[t]) -
                     values[t])

            # 计算GAE
            gae = delta + self.config.gamma * self.config.lambda_gae * (1 - self.buffer.dones[t]) * gae

            # 存储优势和回报
            advantages.insert(0, gae)
            returns.insert(0, gae + values[t])

        # 转换为tensor并归一化优势
        advantages = torch.FloatTensor(advantages).to(self.device)
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        returns = torch.FloatTensor(returns).to(self.device)

        return advantages, returns

    def _update_policy(self, advantages, returns):
        """更新策略网络"""
        # 准备数据
        states = torch.FloatTensor(self.buffer.states).to(self.device)
        actions = torch.FloatTensor(self.buffer.actions).to(self.device)
        old_log_probs = torch.FloatTensor(self.buffer.log_probs).to(self.device)

        # 多次更新
        policy_loss_epoch = 0
        value_loss_epoch = 0
        entropy_loss_epoch = 0
        n_updates = 0

        for _ in range(self.config.n_updates):
            # 评估动作
            log_probs, entropy, values = self.actor_critic.evaluate_action(states, actions)

            # 计算重要性权重
            ratios = torch.exp(log_probs - old_log_probs)

            # 计算surrogate目标
            surr1 = ratios * advantages
            surr2 = torch.clamp(ratios, 1 - self.config.clip_ratio,
                                1 + self.config.clip_ratio) * advantages

            # 计算策略损失
            policy_loss = -torch.min(surr1, surr2).mean()

            # 计算值函数损失
            value_loss = 0.5 * (returns - values.squeeze()).pow(2).mean()

            # 计算熵损失
            entropy_loss = -self.config.entropy_coef * entropy.mean()

            # 总损失
            total_loss = (policy_loss +
                          self.config.value_coef * value_loss +
                          entropy_loss)

            # 更新网络
            self.optimizer.zero_grad()
            total_loss.backward()

            # 梯度裁剪
            nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
                                     self.config.max_grad_norm)

            self.optimizer.step()

            # 累计损失
            policy_loss_epoch += policy_loss.item()
            value_loss_epoch += value_loss.item()
            entropy_loss_epoch += entropy_loss.item()
            n_updates += 1

        # 返回平均损失
        return (policy_loss_epoch / n_updates,
                value_loss_epoch / n_updates,
                entropy_loss_epoch / n_updates)

    def _evaluate_policy(self, n_episodes=5):
        """评估当前策略"""
        eval_stats = {
            'episode_rewards': [],
            'success_rates': [],
            'resource_utils': []
        }

        for _ in range(n_episodes):
            state = self.env.reset()
            episode_reward = 0
            done = False

            while not done:
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                with torch.no_grad():
                    action, _ = self.actor_critic.get_action(state_tensor, deterministic=True)
                action = action.cpu().numpy()[0]

                state, reward, done, info = self.env.step(action)
                episode_reward += reward

            eval_stats['episode_rewards'].append(episode_reward)
            eval_stats['success_rates'].append(info['service_rate'])
            eval_stats['resource_utils'].append(info['resource_utilization'])

        # 计算平均统计
        eval_stats['mean_reward'] = np.mean(eval_stats['episode_rewards'])
        eval_stats['mean_success_rate'] = np.mean(eval_stats['success_rates'])
        eval_stats['mean_resource_util'] = np.mean(eval_stats['resource_utils'])

        return eval_stats

    def save_model(self, path):
        """保存模型"""
        torch.save({
            'actor_critic_state_dict': self.actor_critic.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'state_mean': self.actor_critic.state_mean,
            'state_std': self.actor_critic.state_std,
            'train_stats': self.train_stats,
            'config': self.config
        }, path)

    def load_model(self, path):
        """加载模型"""
        checkpoint = torch.load(path)
        self.actor_critic.load_state_dict(checkpoint['actor_critic_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        self.actor_critic.state_mean.data = checkpoint['state_mean']
        self.actor_critic.state_std.data = checkpoint['state_std']
        self.train_stats = checkpoint['train_stats']
        self.config = checkpoint['config']

    def _get_state_dim(self):
        """计算状态空间维度"""
        state = self.env.reset()
        return len(state)

    def _log_training_progress(self, timesteps_elapsed, eval_stats):
        """记录训练进度"""
        print(f"\nTimesteps: {timesteps_elapsed}")
        print(f"Mean reward: {eval_stats['mean_reward']:.2f}")
        print(f"Success rate: {eval_stats['mean_success_rate']:.2f}")
        print(f"Resource utilization: {eval_stats['mean_resource_util']:.2f}")
        print(f"Learning rate: {self.optimizer.param_groups[0]['lr']:.2e}")


class RolloutBuffer:
    """经验回放缓冲区"""

    def __init__(self):
        self.states = []
        self.actions = []
        self.rewards = []
        self.next_states = []
        self.log_probs = []
        self.dones = []
        self.infos = []

    def add(self, state, action, reward, next_state, log_prob, done, info):
        """添加一条转换记录"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.next_states.append(next_state)
        self.log_probs.append(log_prob)
        self.dones.append(done)
        self.infos.append(info)

    def clear(self):
        """清空缓冲区"""
        self.states.clear()
        self.actions.clear()
        self.rewards.clear()
        self.next_states.clear()
        self.log_probs.clear()
        self.dones.clear()
        self.infos.clear()

    def __len__(self):
        """获取缓冲区大小"""
        return len(self.states)


class Evaluator:
    def __init__(self, config):
        self.config = config

        # 评估配置
        self.n_episodes = 50  # 每个配置的评估轮数
        self.sigma_e_values = np.linspace(0, 1.0, 11)  # CSI误差范围

        # 创建结果存储结构
        self.results = {
            'robust': self._create_result_dict(),
            'non_robust': self._create_result_dict(),
            'ideal': self._create_result_dict()
        }

        # 设置随机种子确保可重复性
        self.seed = 42
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)

        # 创建保存目录
        self.save_dir = f"evaluation_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
        os.makedirs(self.save_dir, exist_ok=True)

        # 初始化日志
        self._setup_logging()

    def _create_result_dict(self):
        """创建结果存储字典"""
        return {
            'success_rates': [],
            'resource_utils': [],
            'qos_satisfaction': [],
            'transmission_delays': [],
            'reliability_scores': [],
            'snr_distributions': [],
            'throughput': [],
            'failed_transmissions': [],
            'resource_fragmentation': []
        }

    def _setup_logging(self):
        """设置日志系统"""
        log_file = os.path.join(self.save_dir, 'evaluation.log')
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)

    def evaluate_algorithms(self):
        """评估不同算法在不同CSI误差条件下的性能"""
        self.logger.info("Starting algorithm evaluation...")

        # 首先训练鲁棒模型
        robust_model = self._train_model(True)
        self.logger.info("Robust model training completed")

        # 训练非鲁棒模型
        non_robust_model = self._train_model(False)
        self.logger.info("Non-robust model training completed")

        # 对每个CSI误差值进行评估
        for sigma_e in self.sigma_e_values:
            self.logger.info(f"\nEvaluating for CSI error σ_e = {sigma_e:.2f}")

            # 更新环境配置
            self.config.sigma_e = sigma_e

            # 评估鲁棒算法
            self._evaluate_model(robust_model, 'robust', sigma_e)

            # 评估非鲁棒算法
            self._evaluate_model(non_robust_model, 'non_robust', sigma_e)

            # 评估理想CSI情况
            if sigma_e == 0:
                self._evaluate_model(non_robust_model, 'ideal', sigma_e)

            # 保存中间结果
            self._save_results(f'results_sigma_e_{sigma_e:.2f}.pkl')

        # 生成最终结果分析
        self._generate_analysis_report()

        # 绘制性能对比图
        self.plot_results()

        self.logger.info("Evaluation completed successfully")

    def _train_model(self, robust):
        """训练模型"""
        # 创建环境
        env = LEONBIoTEnv(self.config, robust=robust)

        # 创建PPO算法实例
        ppo = PPO(self.config, env)

        # 训练模型
        ppo.train(total_timesteps=1e6)  # 可根据需要调整训练步数

        return ppo

    def _evaluate_model(self, model, algorithm_type, sigma_e):
        """评估单个模型"""
        self.logger.info(f"Evaluating {algorithm_type} algorithm...")

        # 创建评估环境
        env = LEONBIoTEnv(self.config, robust=(algorithm_type == 'robust'))

        # 收集评估指标
        episode_metrics = []

        for episode in range(self.n_episodes):
            metrics = self._run_evaluation_episode(model, env)
            episode_metrics.append(metrics)

            if (episode + 1) % 10 == 0:
                self.logger.info(f"Completed {episode + 1}/{self.n_episodes} episodes")

        # 计算平均指标
        avg_metrics = self._aggregate_metrics(episode_metrics)

        # 存储结果
        for metric_name, value in avg_metrics.items():
            self.results[algorithm_type][metric_name].append(value)

        self._log_evaluation_results(algorithm_type, sigma_e, avg_metrics)

    def _run_evaluation_episode(self, model, env):
        """运行单个评估episode"""
        state = env.reset()
        done = False
        episode_data = []

        while not done:
            # 获取动作
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(model.device)
            with torch.no_grad():
                action, _ = model.actor_critic.get_action(state_tensor, deterministic=True)
            action = action.cpu().numpy()[0]

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 收集step数据
            step_data = {
                'state': state,
                'action': action,
                'reward': reward,
                'next_state': next_state,
                'info': info
            }
            episode_data.append(step_data)

            state = next_state

        # 计算episode指标
        return self._calculate_episode_metrics(episode_data, env)

    def _calculate_episode_metrics(self, episode_data, env):
        """计算单个episode的性能指标"""
        metrics = {
            'success_rate': len(env.successful_transmissions) / env.config.N_UE,
            'resource_util': np.sum(env.resource_usage_map) / (env.config.N_RB * env.config.N_SLOT),
            'qos_satisfaction': np.mean([device.get_qos_satisfaction()['overall'] for device in env.devices]),
            'transmission_delay': np.mean(
                [device.transmission_time for device in env.devices if device.transmission_complete]),
            'reliability_score': np.mean([device._get_current_reliability() for device in env.devices]),
            'throughput': sum(device.transmitted_data for device in env.devices) / max(
                device.transmission_time for device in env.devices),
            'failed_transmissions': sum(device.failed_transmissions for device in env.devices),
            'resource_fragmentation': env._calculate_fragmentation(),
            'snr_distribution': [device.transmission_history[-1]['snr_db'] for device in env.devices if
                                 device.transmission_history]
        }

        return metrics

    def _aggregate_metrics(self, episode_metrics):
        """聚合多个episode的指标"""
        aggregated = {}
        for key in episode_metrics[0].keys():
            if key == 'snr_distribution':
                # 保存所有SNR值的分布
                aggregated[key] = np.concatenate([metrics[key] for metrics in episode_metrics])
            else:
                # 计算平均值和标准差
                values = [metrics[key] for metrics in episode_metrics]
                aggregated[key] = np.mean(values)
                aggregated[key + '_std'] = np.std(values)

        return aggregated

    def _log_evaluation_results(self, algorithm_type, sigma_e, metrics):
        """记录评估结果"""
        self.logger.info(f"\n{algorithm_type.upper()} Algorithm Results (σ_e = {sigma_e:.2f}):")
        for metric_name, value in metrics.items():
            if not metric_name.endswith('_std') and not metric_name == 'snr_distribution':
                self.logger.info(f"{metric_name}: {value:.4f} ± {metrics[metric_name + '_std']:.4f}")

    def plot_results(self):
        """绘制性能对比图"""
        self.logger.info("Generating performance comparison plots...")

        plt.style.use('seaborn')
        fig = plt.figure(figsize=(20, 15))

        # 1. 成功服务率对比
        plt.subplot(231)
        self._plot_metric('success_rates', 'Success Service Rate',
                          'Channel Error Variance (σ_e²)', 'Success Rate')

        # 2. 资源利用率对比
        plt.subplot(232)
        self._plot_metric('resource_utils', 'Resource Utilization',
                          'Channel Error Variance (σ_e²)', 'Utilization Rate')

        # 3. QoS满足度对比
        plt.subplot(233)
        self._plot_metric('qos_satisfaction', 'QoS Satisfaction',
                          'Channel Error Variance (σ_e²)', 'Satisfaction Rate')

        # 4. 平均传输时延对比
        plt.subplot(234)
        self._plot_metric('transmission_delays', 'Average Transmission Delay',
                          'Channel Error Variance (σ_e²)', 'Delay (ms)')

        # 5. 可靠性得分对比
        plt.subplot(235)
        self._plot_metric('reliability_scores', 'Reliability Score',
                          'Channel Error Variance (σ_e²)', 'Reliability')

        # 6. 资源碎片化对比
        plt.subplot(236)
        self._plot_metric('resource_fragmentation', 'Resource Fragmentation',
                          'Channel Error Variance (σ_e²)', 'Fragmentation Rate')

        plt.tight_layout()
        plt.savefig(os.path.join(self.save_dir, 'performance_comparison.png'),
                    dpi=300, bbox_inches='tight')
        plt.close()

        # 绘制SNR分布图
        self._plot_snr_distributions()

    def _plot_metric(self, metric_name, title, xlabel, ylabel):
        """绘制单个指标的对比图"""
        plt.plot(self.sigma_e_values, self.results['robust'][metric_name],
                 'b-', label='Robust', linewidth=2)
        plt.plot(self.sigma_e_values, self.results['non_robust'][metric_name],
                 'r--', label='Non-robust', linewidth=2)
        plt.plot(self.sigma_e_values, self.results['ideal'][metric_name],
                 'g:', label='Ideal CSI', linewidth=2)

        plt.title(title)
        plt.xlabel(xlabel)
        plt.ylabel(ylabel)
        plt.grid(True)
        plt.legend()

    def _plot_snr_distributions(self):
        """绘制SNR分布对比图"""
        plt.figure(figsize=(10, 6))

        # 选择几个代表性的sigma_e值进行对比
        sigma_e_indices = [0, 5, 10]  # 对应最小、中间和最大的sigma_e值

        for i, idx in enumerate(sigma_e_indices):
            sigma_e = self.sigma_e_values[idx]

            # 绘制鲁棒算法的SNR分布
            plt.hist(self.results['robust']['snr_distributions'][idx],
                     bins=30, alpha=0.5, label=f'Robust (σ_e={sigma_e:.2f})')

            # 绘制非鲁棒算法的SNR分布
            plt.hist(self.results['non_robust']['snr_distributions'][idx],
                     bins=30, alpha=0.5, label=f'Non-robust (σ_e={sigma_e:.2f})')

        plt.title('SNR Distribution Comparison')
        plt.xlabel('SNR (dB)')
        plt.ylabel('Frequency')
        plt.legend()
        plt.grid(True)

        plt.savefig(os.path.join(self.save_dir, 'snr_distributions.png'),
                    dpi=300, bbox_inches='tight')
        plt.close()

    def _save_results(self, filename):
        """保存评估结果"""
        results_path = os.path.join(self.save_dir, filename)
        with open(results_path, 'wb') as f:
            pickle.dump({
                'sigma_e_values': self.sigma_e_values,
                'results': self.results,
                'config': self.config
            }, f)
        self.logger.info(f"Results saved to {results_path}")

    def _generate_analysis_report(self):
        """生成详细的分析报告"""
        report_path = os.path.join(self.save_dir, 'analysis_report.txt')

        with open(report_path, 'w') as f:
            f.write("Performance Analysis Report\n")
            f.write("=========================\n\n")

            # 1. 总体性能比较
            f.write("1. Overall Performance Comparison\n")
            f.write("---------------------------------\n")
            for metric in ['success_rates', 'resource_utils', 'qos_satisfaction']:
                f.write(f"\n{metric.replace('_', ' ').title()}:\n")
                for algo in ['robust', 'non_robust', 'ideal']:
                    mean_value = np.mean(self.results[algo][metric])
                    std_value = np.std(self.results[algo][metric])
                    f.write(f"{algo.title()}: {mean_value:.4f} ± {std_value:.4f}\n")

            # 2. CSI误差敏感性分析
            f.write("\n2. CSI Error Sensitivity Analysis\n")
            f.write("---------------------------------\n")
            for algo in ['robust', 'non_robust']:
                f.write(f"\n{algo.title()} Algorithm:\n")
                for metric in ['success_rates', 'resource_utils']:
                    slope = np.polyfit(self.sigma_e_values, self.results[algo][metric], 1)[0]
                    f.write(f"{metric.replace('_', ' ').title()} Sensitivity: {slope:.4f}\n")

                    # 3. QoS性能分析
                f.write("\n3. QoS Performance Analysis\n")
                f.write("---------------------------\n")
                for algo in ['robust', 'non_robust']:
                    f.write(f"\n{algo.title()} Algorithm:\n")
                    delays = self.results[algo]['transmission_delays']
                    reliability = self.results[algo]['reliability_scores']

                    f.write(f"Average Delay: {np.mean(delays):.2f} ms\n")
                    f.write(f"Delay Std: {np.std(delays):.2f} ms\n")
                    f.write(f"Average Reliability: {np.mean(reliability):.4f}\n")
                    f.write(f"Reliability Std: {np.std(reliability):.4f}\n")

                # 4. 资源利用效率分析
                f.write("\n4. Resource Efficiency Analysis\n")
                f.write("------------------------------\n")
                for algo in ['robust', 'non_robust']:
                    f.write(f"\n{algo.title()} Algorithm:\n")
                    utils = self.results[algo]['resource_utils']
                    frag = self.results[algo]['resource_fragmentation']

                    f.write(f"Average Resource Utilization: {np.mean(utils):.4f}\n")
                    f.write(f"Resource Utilization Std: {np.std(utils):.4f}\n")
                    f.write(f"Average Fragmentation: {np.mean(frag):.4f}\n")
                    f.write(f"Fragmentation Std: {np.std(frag):.4f}\n")

                # 5. 算法鲁棒性分析
                f.write("\n5. Algorithm Robustness Analysis\n")
                f.write("--------------------------------\n")
                for metric in ['success_rates', 'resource_utils', 'qos_satisfaction']:
                    f.write(f"\n{metric.replace('_', ' ').title()}:\n")

                    # 计算性能下降率
                    robust_decline = ((self.results['robust'][metric][0] - self.results['robust'][metric][-1]) /
                                      self.results['robust'][metric][0])
                    non_robust_decline = (
                            (self.results['non_robust'][metric][0] - self.results['non_robust'][metric][-1]) /
                            self.results['non_robust'][metric][0])

                    f.write(f"Robust Algorithm Performance Decline: {robust_decline:.2%}\n")
                    f.write(f"Non-robust Algorithm Performance Decline: {non_robust_decline:.2%}\n")
                    f.write(f"Relative Improvement: {((1 - robust_decline) / (1 - non_robust_decline) - 1):.2%}\n")

                # 6. 吞吐量分析
                f.write("\n6. Throughput Analysis\n")
                f.write("----------------------\n")
                for algo in ['robust', 'non_robust']:
                    f.write(f"\n{algo.title()} Algorithm:\n")
                    throughput = self.results[algo]['throughput']

                    f.write(f"Average Throughput: {np.mean(throughput) / 1e6:.2f} Mbps\n")
                    f.write(f"Throughput Std: {np.std(throughput) / 1e6:.2f} Mbps\n")
                    f.write(f"Peak Throughput: {np.max(throughput) / 1e6:.2f} Mbps\n")

                # 7. 总结
                f.write("\n7. Summary and Conclusions\n")
                f.write("---------------------------\n")

                # 计算总体性能提升
                avg_improvement = {}
                for metric in ['success_rates', 'resource_utils', 'qos_satisfaction']:
                    improvement = ((np.mean(self.results['robust'][metric]) -
                                    np.mean(self.results['non_robust'][metric])) /
                                   np.mean(self.results['non_robust'][metric]) * 100)
                    avg_improvement[metric] = improvement

                f.write("\nOverall Performance Improvements:\n")
                for metric, improvement in avg_improvement.items():
                    f.write(f"{metric.replace('_', ' ').title()}: {improvement:.2f}%\n")

                f.write("\nKey Findings:\n")
                f.write("1. Robustness against CSI errors\n")
                f.write("2. Resource utilization efficiency\n")
                f.write("3. QoS satisfaction levels\n")
                f.write("4. Trade-offs between different performance metrics\n")

            self.logger.info(f"Analysis report generated and saved to {report_path}")

            def get_summary_statistics(self):
                """获取汇总统计信息"""
                summary = {}

                for algo in ['robust', 'non_robust', 'ideal']:
                    summary[algo] = {}
                    for metric in ['success_rates', 'resource_utils', 'qos_satisfaction']:
                        values = self.results[algo][metric]
                        summary[algo][metric] = {
                            'mean': np.mean(values),
                            'std': np.std(values),
                            'max': np.max(values),
                            'min': np.min(values)
                        }

                return summary


def setup_logging(save_dir):
    """设置日志系统"""
    log_file = os.path.join(save_dir, 'simulation.log')
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger(__name__)


def setup_environment(save_dir, seed=42):
    """设置实验环境"""
    # 设置随机种子
    torch.manual_seed(seed)
    np.random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.backends.cudnn.deterministic = True

    # 创建必要的目录
    os.makedirs(os.path.join(save_dir, 'models'), exist_ok=True)
    os.makedirs(os.path.join(save_dir, 'results'), exist_ok=True)
    os.makedirs(os.path.join(save_dir, 'logs'), exist_ok=True)


def train_model(config, env, logger):
    """训练模型"""
    try:
        logger.info("Starting model training...")

        # 创建PPO实例
        ppo = PPO(config, env)

        # 训练模型
        start_time = datetime.now()
        ppo.train(total_timesteps=int(1e6))  # 可根据需要调整训练步数
        training_time = datetime.now() - start_time

        logger.info(f"Training completed in {training_time}")
        return ppo

    except Exception as e:
        logger.error(f"Training failed: {str(e)}")
        logger.error(traceback.format_exc())
        raise


def evaluate_model(config, model, evaluator, logger):
    """评估模型性能"""
    try:
        logger.info("Starting model evaluation...")

        # 执行评估
        start_time = datetime.now()
        eval_results = evaluator.evaluate_algorithms()
        evaluation_time = datetime.now() - start_time

        logger.info(f"Evaluation completed in {evaluation_time}")
        return eval_results

    except Exception as e:
        logger.error(f"Evaluation failed: {str(e)}")
        logger.error(traceback.format_exc())
        raise


def save_results(results, save_dir, logger):
    """保存实验结果"""
    try:
        # 保存评估结果
        results_path = os.path.join(save_dir, 'results', 'final_results.pkl')
        with open(results_path, 'wb') as f:
            pickle.dump(results, f)

        logger.info(f"Results saved to {results_path}")

    except Exception as e:
        logger.error(f"Failed to save results: {str(e)}")
        logger.error(traceback.format_exc())
        raise


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='NB-IoT低轨卫星资源调度仿真')
    parser.add_argument('--config', type=str, default=None, help='配置文件路径')
    parser.add_argument('--seed', type=int, default=42, help='随机种子')
    args = parser.parse_args()

    try:
        # 创建实验目录
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        save_dir = f"experiment_{timestamp}"
        os.makedirs(save_dir, exist_ok=True)

        # 设置日志
        logger = setup_logging(save_dir)
        logger.info("Starting simulation...")

        # 设置实验环境
        setup_environment(save_dir, args.seed)

        # 加载或创建配置
        if args.config:
            config = SimConfig()
            config.load(args.config)
        else:
            config = SimConfig()

        # 验证配置
        config.validate()
        logger.info("Configuration validated successfully")

        # 保存配置
        config_path = os.path.join(save_dir, 'config.pkl')
        config.save(config_path)
        logger.info(f"Configuration saved to {config_path}")

        # 创建环境
        env_robust = LEONBIoTEnv(config, robust=True)
        env_non_robust = LEONBIoTEnv(config, robust=False)

        # 训练鲁棒模型
        logger.info("Training robust model...")
        robust_model = train_model(config, env_robust, logger)

        # 训练非鲁棒模型
        logger.info("Training non-robust model...")
        non_robust_model = train_model(config, env_non_robust, logger)

        # 创建评估器
        evaluator = Evaluator(config)

        # 评估模型
        logger.info("Evaluating models...")
        eval_results = evaluator.evaluate_algorithms()

        # 保存结果
        save_results(eval_results, save_dir, logger)

        # 生成最终报告
        logger.info("Generating final report...")
        evaluator._generate_analysis_report()

        # 绘制结果图表
        logger.info("Plotting results...")
        evaluator.plot_results()

        logger.info("Simulation completed successfully!")

    except Exception as e:
        logger.error(f"Simulation failed: {str(e)}")
        logger.error(traceback.format_exc())
        raise


if __name__ == "__main__":
    main()
