import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from gym import spaces
from torch.distributions import Normal
import matplotlib.pyplot as plt
from collections import defaultdict


# ==============================
# 1. System Configuration
# ==============================
class SimConfig:
    def __init__(self):
        # Modified system parameters
        self.N_UE = 10  # Reduced for initial testing
        self.N_RB = 8
        self.N_SLOT = 100
        self.epochs = 150  # Increased for better convergence

        # Channel parameters
        self.rho = 0.95  # AR(1) correlation
        self.sigma_e = 0.1  # Error variance

        # 终端参数
        self.data_size_range = (500, 2000)
        self.reliability_range = (0.7, 0.8)
        self.delay_range = (150, 300)

        # NB-IoT parameters
        self.tx_power_dbm = 23  # Transmit power
        self.noise_figure_db = 5
        self.thermal_noise_dbm = -174
        self.rb_bandwidth = 180e3  # Hz

        # PPO parameters
        self.gamma = 0.99
        self.lambda_gae = 0.95
        self.clip_ratio = 0.2
        self.ppo_epochs = 8
        self.lr = 1e-4  # Reduced learning rate
        self.max_grad_norm = 0.5
        self.entropy_coef = 0.01
        self.value_loss_coef = 0.5
        self.mini_batch_size = 64
        self.episodes_per_epoch = 4

        # 动作方差衰减
        self.action_std_decay_rate = 0.995
        self.min_action_std = 0.1

        # Robust optimization parameters
        self.robust_confidence = 0.95  # Confidence level for robust margin
        self.min_margin = 0.05         # Minimum margin
        self.max_margin = 0.3         # Maximum margin


# ==============================
# 2. Channel Model with CSI Error
# ==============================
class ChannelModel:
    def __init__(self, config):
        self.config = config
        self.rho = config.rho
        self.sigma_e = config.sigma_e
        self.prev_fading = None

    def reset(self):
        self.prev_fading = None

    def generate_channel_gain(self, N_UE):
        """Generate both estimated and true channel gains."""
        # Path loss (80-110 dB)
        path_loss_db = np.random.uniform(80, 110, size=N_UE)
        path_loss_linear = 10 ** (-path_loss_db / 10.)

        # Small-scale fading with AR(1) model
        if self.prev_fading is None:
            real_part = np.random.normal(0, 1 / np.sqrt(2), N_UE)
            imag_part = np.random.normal(0, 1 / np.sqrt(2), N_UE)
            fading = real_part + 1j * imag_part
        else:
            inno_real = np.random.normal(0, self.sigma_e, N_UE)
            inno_imag = np.random.normal(0, self.sigma_e, N_UE)
            innovation = inno_real + 1j * inno_imag
            fading = self.rho * self.prev_fading + np.sqrt(1 - self.rho ** 2) * innovation

        self.prev_fading = fading
        true_gain = path_loss_linear * np.abs(fading) ** 2

        # Add estimation error based on error variance
        error = np.random.normal(0, self.sigma_e, N_UE)
        est_gain = true_gain * (1 + error)

        return est_gain, true_gain


# ==============================
# 3. IoT Device Class
# ==============================
class IoTDevice:
    def __init__(self, config):
        self.config = config
        # Random requirements
        self.data_size = np.random.uniform(*config.data_size_range)
        self.reliability_req = np.random.uniform(*config.reliability_range)
        self.delay_req = np.random.uniform(*config.delay_range)

        # Radio parameters
        self.tx_power_dbm = config.tx_power_dbm
        self.noise_figure_db = config.noise_figure_db
        self.thermal_noise_dbm = config.thermal_noise_dbm

        # Status tracking
        self.transmitted_data = 0
        self.transmission_time = 0
        self.failed_count = 0
        self.trans_done = False
        self.history = []

    def calculate_achievable_rate(self, channel_gain, bandwidth_hz):
        """Calculate achievable rate with given channel gain."""
        tx_power_mw = 10 ** (self.tx_power_dbm / 10.)
        noise_power_dbm = self.thermal_noise_dbm + 10 * np.log10(bandwidth_hz) + self.noise_figure_db
        noise_power_mw = 10 ** (noise_power_dbm / 10.)
        rx_power_mw = tx_power_mw * channel_gain
        snr_linear = rx_power_mw / noise_power_mw

        # 使用Shannon公式计算频谱效率
        spectral_eff = np.log2(1 + snr_linear)

        # 限制最大频谱效率（考虑调制方式限制）
        spectral_eff = np.minimum(spectral_eff, 4.0)  # 假设最高使用16QAM

        # 计算实际可达速率（bits per second）
        return spectral_eff * bandwidth_hz

    def update_transmission(self, channel_gain, ru_bandwidth_hz, duration_ms):
        """Update transmission status."""
        if self.trans_done:
            return True

        # Calculate rate and transmitted data
        rate_bps = self.calculate_achievable_rate(channel_gain, ru_bandwidth_hz)
        data_this_ru = rate_bps * (duration_ms / 1000.0)

        # Calculate SNR for success determination
        snr_linear = (10 ** (self.tx_power_dbm / 10.)) * channel_gain / \
                     (10 ** ((self.thermal_noise_dbm + 10 * np.log10(ru_bandwidth_hz) + self.noise_figure_db) / 10.))
        snr_db = 10 * np.log10(snr_linear + 1e-9)

        # Transmission success criteria
        min_snr_db = 0  # Minimum required SNR
        success = (snr_db >= min_snr_db)

        if success:
            self.transmitted_data += data_this_ru
        else:
            self.failed_count += 1

        self.transmission_time += duration_ms
        self.history.append((duration_ms, success, data_this_ru, snr_db))

        # Check completion
        if self.transmitted_data >= self.data_size:
            self.trans_done = True
            return True

        # Check constraints
        if self.transmission_time > self.delay_req:
            return False
        cur_reliab = 1 - self.failed_count / max(1, len(self.history))
        if cur_reliab < self.reliability_req:
            return False

        return None


# ==============================
# 4. Resource Unit Types
# ==============================
class RUType:
    def __init__(self, freq_size, time_size, subcarrier_spacing):
        self.freq_size = freq_size
        self.time_size = time_size
        self.subcarrier_spacing = subcarrier_spacing

    @property
    def total_bandwidth_hz(self):
        return self.freq_size * self.subcarrier_spacing


# ==============================
# 5. Resource Manager
# ==============================
class ResourceManager:
    def __init__(self, N_RB, N_SLOT):
        self.N_RB = N_RB
        self.N_SLOT = N_SLOT
        self.min_rb_alloc = 1
        self.max_rb_alloc = 3
        self.min_slot_alloc = 2
        self.max_slot_alloc = 4
        self.reset()

    def reset(self):
        """重置资源网格"""
        self.grid = np.zeros((self.N_RB, self.N_SLOT), dtype=int)

    def can_allocate(self, rb_start, rb_end, slot_start, slot_end):
        """检查资源块是否可以分配"""
        if rb_start < 0 or rb_end > self.N_RB:
            return False
        if slot_start < 0 or slot_end > self.N_SLOT:
            return False
        region = self.grid[rb_start:rb_end, slot_start:slot_end]
        return np.all(region == 0)

    def find_best_allocation(self, data_size, channel_gain, bandwidth_per_rb):
        """计算最优的资源分配 (非常简化的示例做法)"""
        best_rb = 0
        best_slot = 0
        min_total_resource = float('inf')

        # 遍历所有可能的资源分配组合
        for rb in range(self.min_rb_alloc, self.max_rb_alloc + 1):
            for slot in range(self.min_slot_alloc, self.max_slot_alloc + 1):
                total_bandwidth = rb * bandwidth_per_rb
                total_time = slot * 0.5  # 每个时隙0.5ms

                # 计算可达速率（仅作演示，直接把 channel_gain 视为 SNR）
                snr = channel_gain * total_bandwidth
                achievable_rate = total_bandwidth * np.log2(1 + snr)
                achievable_data = achievable_rate * total_time / 1000  # 转换为比特

                # 如果可以满足数据需求，选择资源最少的配置
                if achievable_data >= data_size:
                    total_resource = rb * slot
                    if total_resource < min_total_resource:
                        min_total_resource = total_resource
                        best_rb = rb
                        best_slot = slot

        # 如果找不到合适的分配，返回最小配置
        return best_rb if best_rb > 0 else self.min_rb_alloc, \
               best_slot if best_slot > 0 else self.min_slot_alloc

    def get_available_block(self, required_rb, required_slot):
        """查找可用的资源块"""
        for t in range(self.N_SLOT - required_slot + 1):
            for f in range(self.N_RB - required_rb + 1):
                if self.can_allocate(f, f + required_rb, t, t + required_slot):
                    return f, t
        return None

    def allocate(self, device_idx, rb_start, rb_end, slot_start, slot_end):
        """分配资源块给设备"""
        if not self.can_allocate(rb_start, rb_end, slot_start, slot_end):
            return False
        self.grid[rb_start:rb_end, slot_start:slot_end] = device_idx + 1
        return True

    def get_utilization(self):
        """计算资源利用率"""
        used = np.count_nonzero(self.grid)
        total = self.N_RB * self.N_SLOT
        return used / total

    def get_fragmentation(self):
        """计算资源碎片化程度"""
        if np.count_nonzero(self.grid) == 0:
            return 0
        fragments = 0
        for t in range(self.N_SLOT):
            freq_changes = np.diff(self.grid[:, t] != 0).sum()
            fragments += freq_changes
        time_slots_used = np.sum(np.any(self.grid != 0, axis=0))
        if time_slots_used == 0:
            return 0
        return fragments / time_slots_used


# ==============================
# 6. NB-IoT Environment
# ==============================
class NBIoTEnv:
    def __init__(self, config, robust=False):
        self.config = config
        self.robust = robust
        self.N_UE = config.N_UE
        self.N_RB = config.N_RB
        self.N_SLOT = config.N_SLOT

        # Models
        self.channel_model = ChannelModel(config)
        self.resource_manager = ResourceManager(self.N_RB, self.N_SLOT)

        # RU types definition
        self.ru_types = [
            RUType(freq_size=1, time_size=1, subcarrier_spacing=15000),
            RUType(freq_size=3, time_size=2, subcarrier_spacing=15000),
            RUType(freq_size=1, time_size=4, subcarrier_spacing=3750),
        ]

        # Spaces
        self.observation_space = spaces.Box(
            low=-np.inf, high=np.inf, shape=(self.N_UE + 4,), dtype=np.float32
        )
        self.action_space = spaces.Box(
            low=0.0, high=1.0, shape=(4,), dtype=np.float32
        )

        # Performance metrics
        self.metrics = defaultdict(list)

    def reset(self):
        self.current_step = 0
        self.resource_manager.reset()
        self.channel_model.reset()

        # Generate devices
        self.devices = [IoTDevice(self.config) for _ in range(self.N_UE)]
        self.est_gain, self.true_gain = self.channel_model.generate_channel_gain(self.N_UE)

        # Reset metrics
        self.metrics = defaultdict(list)
        return self._get_state()

    def _get_state(self):
        # Normalized channel gains
        gains = self.est_gain / (np.max(self.est_gain) + 1e-9)

        # Resource utilization
        util = self.resource_manager.get_utilization()

        # Resource fragmentation
        frag = self.resource_manager.get_fragmentation()

        # Progress
        progress = self.current_step / self.N_SLOT

        # Success ratio
        completed = sum(1 for d in self.devices if d.trans_done)
        success_ratio = completed / self.N_UE

        return np.concatenate([gains, [util, frag, progress, success_ratio]]).astype(np.float32)

    def calculate_robust_margin(self, channel_gain):
        """Improved robust margin calculation."""
        if not self.robust:
            return 0

        confidence = self.config.robust_confidence
        variance = (self.config.sigma_e * channel_gain) ** 2

        base_margin = np.sqrt(variance / (1 - confidence))

        channel_condition = channel_gain / np.mean(self.est_gain)
        adaptive_factor = 1.0 if channel_condition > 1 else channel_condition
        margin = base_margin * adaptive_factor

        min_margin = self.config.min_margin * channel_gain
        max_margin = self.config.max_margin * channel_gain * (1 + self.config.sigma_e)
        return np.clip(margin, min_margin, max_margin)

    def step(self, action):
        """改进的环境步进函数"""
        self.current_step += 1

        # 解析动作空间
        device_idx = int(np.clip(action[0] * self.N_UE, 0, self.N_UE - 1))
        device = self.devices[device_idx]

        # 检查设备状态
        if device.trans_done:
            return self._get_state(), -5.0, False, {"status": "device_completed"}

        # 计算有效信道增益
        channel_gain = self.est_gain[device_idx]
        if self.robust:
            margin = self.calculate_robust_margin(channel_gain)
            effective_gain = max(1e-9, channel_gain - margin)
        else:
            effective_gain = channel_gain

        # 计算最优资源分配
        remaining_data = max(0, device.data_size - device.transmitted_data)
        required_rb, required_slot = self.resource_manager.find_best_allocation(
            remaining_data,
            effective_gain,
            self.ru_types[0].total_bandwidth_hz  # 使用基础RU的带宽
        )

        # 查找可用资源块
        allocation = self.resource_manager.get_available_block(required_rb, required_slot)
        if allocation is None:
            return self._get_state(), -1.0, False, {"status": "no_resource"}

        freq_start, time_start = allocation
        freq_end = freq_start + required_rb
        time_end = time_start + required_slot

        # 分配资源
        if not self.resource_manager.allocate(device_idx, freq_start, freq_end, time_start, time_end):
            return self._get_state(), -1.0, False, {"status": "allocation_failed"}

        # 执行传输
        bandwidth = required_rb * self.ru_types[0].total_bandwidth_hz
        duration_ms = required_slot * 0.5  # 每个时隙0.5ms
        trans_status = device.update_transmission(effective_gain, bandwidth, duration_ms)

        # 更新信道状态
        self.est_gain, self.true_gain = self.channel_model.generate_channel_gain(self.N_UE)

        # 计算奖励和状态
        reward = self._calculate_reward(device, trans_status)
        state = self._get_state()
        done = self._check_done()

        # 更新并返回当前性能指标
        metrics = self.get_current_metrics()
        return state, reward, done, {
            "status": "success" if trans_status else "ongoing",
            "metrics": metrics
        }

    def get_current_metrics(self):
        """获取当前性能指标"""
        metrics = {}
        completed = 0
        qos_satisfied = 0
        for device in self.devices:
            if device.trans_done:
                completed += 1
                reliab = 1 - device.failed_count / max(1, len(device.history))
                if (reliab >= device.reliability_req and
                        device.transmission_time <= device.delay_req):
                    qos_satisfied += 1

        metrics['success_rate'] = completed / self.N_UE
        metrics['qos_satisfaction'] = qos_satisfied / self.N_UE
        metrics['resource_util'] = self.resource_manager.get_utilization()
        return metrics

    def _calculate_reward(self, device, trans_status):
        """改进的奖励计算方法"""
        reward = 0.0
        if trans_status is True:  # 完全成功
            reward += 20.0
            if device.transmission_time <= device.delay_req:
                reward += 10.0
            actual_reliab = 1 - device.failed_count / max(1, len(device.history))
            if actual_reliab >= device.reliability_req:
                reward += 10.0
        elif trans_status is False:  # 失败
            reward -= 15.0
        else:  # 进行中
            progress = device.transmitted_data / device.data_size
            if progress > 0:
                reward += progress * 5.0
            time_pressure = device.transmission_time / device.delay_req
            if time_pressure > 0.8:
                reward -= 5.0 * (time_pressure - 0.8)

        # 资源利用奖励
        util = self.resource_manager.get_utilization()
        reward += util * 10.0

        # 碎片化惩罚
        frag = self.resource_manager.get_fragmentation()
        reward -= frag * 5.0
        return reward

    def _check_done(self):
        """Check termination conditions"""
        if self.current_step > self.N_SLOT * 2:
            return True
        all_end = True
        for d in self.devices:
            if not d.trans_done and d.transmission_time <= d.delay_req:
                all_end = False
                break
        return all_end


# ==============================
# 7. PPO Implementation
# ==============================
class PPOBuffer:
    def __init__(self, config):
        self.gamma = config.gamma
        self.lmbda = config.lambda_gae
        self.reset()

    def reset(self):
        self.states = []
        self.actions = []
        self.rewards = []
        self.dones = []
        self.log_probs = []
        self.values = []

    def store(self, state, action, reward, done, log_prob, value):
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.dones.append(done)
        self.log_probs.append(log_prob)
        self.values.append(value)

    def finish_trajectory(self):
        adv = 0
        advantages = []
        returns = []

        for t in reversed(range(len(self.rewards))):
            next_val = 0 if t == len(self.rewards) - 1 else self.values[t + 1]
            next_non_terminal = 1.0 - self.dones[t]

            delta = self.rewards[t] + self.gamma * next_val * next_non_terminal - self.values[t]
            adv = delta + self.gamma * self.lmbda * next_non_terminal * adv
            advantages.append(adv)

        advantages.reverse()
        self.advantages = np.array(advantages, dtype=np.float32)

        for i in range(len(self.rewards)):
            returns.append(self.values[i] + self.advantages[i])
        self.returns = np.array(returns, dtype=np.float32)

    def get(self):
        return dict(
            states=np.array(self.states, dtype=np.float32),
            actions=np.array(self.actions, dtype=np.float32),
            rewards=np.array(self.rewards, dtype=np.float32),
            dones=np.array(self.dones, dtype=np.float32),
            log_probs=np.array(self.log_probs, dtype=np.float32),
            values=np.array(self.values, dtype=np.float32),
            advantages=self.advantages,
            returns=self.returns
        )

    def clear(self):
        self.reset()


class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super().__init__()
        hidden_size = 256  # Increased network capacity

        # Shared layers
        self.shared = nn.Sequential(
            nn.Linear(state_dim, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU()
        )

        # Actor head
        self.actor = nn.Sequential(
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.Linear(hidden_size // 2, action_dim)
        )

        # Critic head
        self.critic = nn.Sequential(
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.Linear(hidden_size // 2, 1)
        )

        self.action_std = 0.5

    def forward(self, x):
        shared_feat = self.shared(x)
        action_logits = self.actor(shared_feat)
        value = self.critic(shared_feat)
        return action_logits, value

    def get_dist(self, x):
        logits, _ = self.forward(x)
        means = torch.sigmoid(logits)
        stds = torch.full_like(means, self.action_std)
        return Normal(means, stds)


class PPOAgent:
    def __init__(self, config, env):
        self.config = config
        self.env = env
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]

        self.ac = ActorCritic(self.state_dim, self.action_dim)
        self.optimizer = optim.Adam(self.ac.parameters(), lr=config.lr)
        self.buffer = PPOBuffer(config)

        # Training history
        self.training_history = defaultdict(list)

    def select_action(self, state):
        s = torch.FloatTensor(state).unsqueeze(0)
        dist = self.ac.get_dist(s)
        _, v = self.ac(s)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum(dim=-1)
        return action.detach().numpy()[0], log_prob.item(), v.item()

    def update_policy(self):
        data = self.buffer.get()

        states = torch.FloatTensor(data["states"])
        actions = torch.FloatTensor(data["actions"])
        advantages = torch.FloatTensor(data["advantages"])
        returns = torch.FloatTensor(data["returns"])
        old_log_probs = torch.FloatTensor(data["log_probs"])

        # Normalize advantages
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        for _ in range(self.config.ppo_epochs):
            dist = self.ac.get_dist(states)
            _, values = self.ac(states)
            log_probs = dist.log_prob(actions).sum(dim=-1)

            # Policy loss
            ratio = torch.exp(log_probs - old_log_probs)
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.config.clip_ratio, 1 + self.config.clip_ratio) * advantages
            policy_loss = -torch.min(surr1, surr2).mean()

            # Value loss
            value_loss = F.mse_loss(values.squeeze(), returns)

            # Entropy bonus
            entropy = dist.entropy().sum(dim=-1).mean()

            # Total loss
            loss = (policy_loss
                    + self.config.value_loss_coef * value_loss
                    - self.config.entropy_coef * entropy)

            # Update
            self.optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(self.ac.parameters(), self.config.max_grad_norm)
            self.optimizer.step()

            # Store training metrics
            self.training_history["policy_loss"].append(policy_loss.item())
            self.training_history["value_loss"].append(value_loss.item())
            self.training_history["entropy"].append(entropy.item())

    def decay_action_std(self):
        self.ac.action_std = max(
            self.ac.action_std * self.config.action_std_decay_rate,
            self.config.min_action_std
        )


# ==============================
# 8. Training and Evaluation
# ==============================
def train_epoch(agent, env, config):
    """Train for one epoch"""
    episode_metrics = {
        'reward': [],
        'success_rate': [],
        'qos_satisfaction': [],
        'resource_util': []
    }

    for _ in range(config.episodes_per_epoch):
        state = env.reset()
        done = False
        ep_reward = 0

        while not done:
            action, log_prob, val = agent.select_action(state)
            next_state, reward, done, info = env.step(action)

            agent.buffer.store(state, action, reward, done, log_prob, val)
            state = next_state
            ep_reward += reward

            # Store metrics
            if 'metrics' in info:
                for k, v in info['metrics'].items():
                    if k in episode_metrics:
                        episode_metrics[k].append(v)

        agent.buffer.finish_trajectory()
        episode_metrics['reward'].append(ep_reward)

    # Update policy
    agent.update_policy()

    # Average metrics
    return {k: np.mean(v) for k, v in episode_metrics.items()}


def train_and_evaluate(config):
    """Train and evaluate both robust and non-robust agents"""
    env_robust = NBIoTEnv(config, robust=True)
    env_nonrobust = NBIoTEnv(config, robust=False)

    agent_robust = PPOAgent(config, env_robust)
    agent_nonrobust = PPOAgent(config, env_nonrobust)

    history = {
        "robust": defaultdict(list),
        "nonrobust": defaultdict(list)
    }

    for epoch in range(config.epochs):
        # Train robust agent
        metrics_robust = train_epoch(agent_robust, env_robust, config)
        # Train non-robust agent
        metrics_nonrobust = train_epoch(agent_nonrobust, env_nonrobust, config)

        # Record
        for k, v in metrics_robust.items():
            history["robust"][k].append(v)
        for k, v in metrics_nonrobust.items():
            history["nonrobust"][k].append(v)

        # Print progress every 5 epochs
        if (epoch + 1) % 5 == 0:
            print(f"\n[Epoch {epoch + 1}/{config.epochs}]")
            print(f"Robust: reward={metrics_robust['reward']:.2f}, "
                  f"success_rate={metrics_robust['success_rate']:.2f}, "
                  f"qos={metrics_robust['qos_satisfaction']:.2f}")
            print(f"Non-robust: reward={metrics_nonrobust['reward']:.2f}, "
                  f"success_rate={metrics_nonrobust['success_rate']:.2f}, "
                  f"qos={metrics_nonrobust['qos_satisfaction']:.2f}")

        # Decay exploration
        agent_robust.decay_action_std()
        agent_nonrobust.decay_action_std()

    return history


def test_performance(agent, env, episodes=5):
    """测试智能体性能"""
    metrics_list = []

    for _ in range(episodes):
        state = env.reset()
        done = False
        episode_metrics = None

        while not done:
            action, _, _ = agent.select_action(state)
            state, _, done, info = env.step(action)
            if 'metrics' in info:
                episode_metrics = info['metrics']

        if episode_metrics:
            metrics_list.append(episode_metrics)

    if not metrics_list:
        return {'success_rate': 0, 'resource_util': 0, 'qos_satisfaction': 0}

    avg_metrics = {}
    for key in metrics_list[0].keys():
        avg_metrics[key] = np.mean([m[key] for m in metrics_list])
    return avg_metrics


def analyze_csi_impact(config):
    """分析CSI误差对性能的影响"""
    error_vars = np.linspace(0.05, 0.5, 10)
    results = {
        'robust': {'success_rate': [], 'resource_util': [], 'qos_satisfaction': []},
        'nonrobust': {'success_rate': [], 'resource_util': [], 'qos_satisfaction': []}
    }

    original_sigma_e = config.sigma_e

    for error_var in error_vars:
        print(f"\nTesting CSI error variance: {error_var:.2f}")
        config.sigma_e = error_var

        # 测试鲁棒算法
        env_robust = NBIoTEnv(config, robust=True)
        agent_robust = PPOAgent(config, env_robust)
        for _ in range(20):  # 简单训练
            _ = train_epoch(agent_robust, env_robust, config)
        test_metrics_robust = test_performance(agent_robust, env_robust)

        for key in results['robust'].keys():
            results['robust'][key].append(test_metrics_robust[key])

        # 测试非鲁棒算法
        env_nonrobust = NBIoTEnv(config, robust=False)
        agent_nonrobust = PPOAgent(config, env_nonrobust)
        for _ in range(20):
            _ = train_epoch(agent_nonrobust, env_nonrobust, config)
        test_metrics_nonrobust = test_performance(agent_nonrobust, env_nonrobust)

        for key in results['nonrobust'].keys():
            results['nonrobust'][key].append(test_metrics_nonrobust[key])

    # 恢复
    config.sigma_e = original_sigma_e
    return results


def plot_smooth_metric(epochs, data_robust, data_nonrobust, metric_name, window=5):
    """使用滑动平均和方差带绘制平滑曲线"""
    plt.figure(figsize=(10, 6))

    def smooth_data(data, window_size):
        weights = np.ones(window_size) / window_size
        smooth = np.convolve(data, weights, mode='valid')
        pad_size = window_size - 1
        smooth = np.pad(smooth, (pad_size // 2, pad_size - pad_size // 2), mode='edge')
        return smooth

    def calc_std_band(data, window_size):
        std = np.array([
            np.std(data[max(0, i - window_size): min(len(data), i + window_size)])
            for i in range(len(data))
        ])
        smooth_mean = smooth_data(data, window_size)
        return smooth_mean - std, smooth_mean + std

    smooth_robust = smooth_data(data_robust, window)
    lower_r, upper_r = calc_std_band(data_robust, window)
    plt.plot(epochs, smooth_robust, 'b-', label='Robust', linewidth=2)
    plt.fill_between(epochs, lower_r, upper_r, color='blue', alpha=0.2)

    smooth_nonrobust = smooth_data(data_nonrobust, window)
    lower_nr, upper_nr = calc_std_band(data_nonrobust, window)
    plt.plot(epochs, smooth_nonrobust, 'r--', label='Non-robust', linewidth=2)
    plt.fill_between(epochs, lower_nr, upper_nr, color='red', alpha=0.2)

    plt.title(f'{metric_name} vs Training Epochs')
    plt.xlabel('Training Epochs')
    plt.ylabel(metric_name)
    plt.grid(True, alpha=0.3)
    plt.legend()
    plt.show()
    input("Press Enter to continue...")
    plt.close()


def plot_training_results(history, config):
    """绘制训练过程中的性能指标"""
    epochs = range(1, config.epochs + 1)
    metrics = {
        'Reward': 'reward',
        'Success Rate': 'success_rate',
        'Resource Utilization': 'resource_util'
    }

    for title, metric in metrics.items():
        plot_smooth_metric(
            epochs,
            history['robust'][metric],
            history['nonrobust'][metric],
            title
        )


def plot_csi_error_impact(error_vars, results):
    """绘制CSI误差影响分析"""
    metrics = {
        'Success Rate': 'success_rate',
        'Resource Utilization': 'resource_util',
        'QoS Satisfaction': 'qos_satisfaction'
    }

    for title, metric in metrics.items():
        plt.figure(figsize=(10, 6))
        plt.plot(error_vars, results['robust'][metric], 'b-', label='Robust', linewidth=2)
        plt.plot(error_vars, results['nonrobust'][metric], 'r--', label='Non-robust', linewidth=2)
        plt.title(f'{title} vs CSI Error')
        plt.xlabel('CSI Error Variance')
        plt.ylabel(title)
        plt.grid(True, alpha=0.3)
        plt.legend()
        plt.show()
        input("Press Enter to continue...")
        plt.close()


def main():
    # 创建配置
    config = SimConfig()

    print("Starting training...")
    history = train_and_evaluate(config)

    # 绘制训练结果
    plot_training_results(history, config)

    # 分析CSI误差影响
    print("\nAnalyzing CSI error impact...")
    error_vars = np.linspace(0.05, 0.5, 10)
    results = analyze_csi_impact(config)
    plot_csi_error_impact(error_vars, results)

    print("\nSimulation completed.")


if __name__ == "__main__":
    main()
