import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal
from scipy.constants import c
import torch.nn.functional as F


# =========== 1. 系统配置 ===============
class SimConfig:
    def __init__(self):
        # 用户 & 资源数
        self.N_UE = 10
        self.N_RB = 12

        # 每个episode的最大步数(与时延、子帧数相关)
        self.max_steps = 300

        # 信道模型
        self.rho = 0.95  # 小尺度衰落AR(1)模型相关系数
        self.sigma_e = 0.1  # 信道估计误差标准差 (可在此处调大/调小, 以观测鲁棒算法差异)

        # QoS需求(仅做随机范围)
        self.data_size_range = (1000, 2000)
        self.reliability_range = (0.9, 0.99)
        self.delay_range = (100, 300)  # ms

        # PPO超参数
        self.gamma = 0.99
        self.lambda_gae = 0.95
        self.clip_ratio = 0.2
        self.lr = 3e-4
        self.epochs = 20
        self.batch_size = 64


# =========== 2. 终端类 ===============
class IoTDevice:
    def __init__(self, config):
        self.data_size = np.random.uniform(*config.data_size_range)
        self.reliability = np.random.uniform(*config.reliability_range)
        self.delay_constraint = np.random.uniform(*config.delay_range)

        self.transmitted_data = 0.0
        self.transmission_time = 0.0
        self.transmission_complete = False

    def update_transmission(self, data_bits, delta_t_ms):
        """
        在本step内传输了 data_bits bit数据, 用时 delta_t_ms 毫秒
        """
        if self.transmission_complete:
            return
        self.transmitted_data += data_bits
        self.transmission_time += delta_t_ms
        if self.transmitted_data >= self.data_size:
            self.transmission_complete = True


# =========== 3. 信道模型 ===============
class ChannelModel:
    def __init__(self, config):
        self.config = config
        self.satellite_height = 550e3
        self.satellite_velocity = 7.6e3
        self.carrier_frequency = 2e9
        self.wavelength = c / self.carrier_frequency

        self.rho = config.rho
        self.sigma_e = config.sigma_e

        # 粗略计算最大多普勒频移(如需更复杂,可自行添加)
        self.max_doppler_shift = self.satellite_velocity / self.wavelength

        # 地球半径(简化取值)
        self.EARTH_RADIUS = 6378e3

        # 初始化
        self.initialize_user_positions()
        self.satellite_x = 0.0  # 用于简单记录卫星在x轴的移动

    def initialize_user_positions(self):
        # 简单随机生成用户在覆盖范围内的坐标
        coverage_radius = math.sqrt(2 * self.EARTH_RADIUS * self.satellite_height + self.satellite_height ** 2)
        r = coverage_radius * np.sqrt(np.random.rand(self.config.N_UE))
        theta = 2 * np.pi * np.random.rand(self.config.N_UE)
        x = r * np.cos(theta)
        y = r * np.sin(theta)
        self.user_positions = np.stack([x, y], axis=-1)
        self.user_distances = np.sqrt(self.satellite_height ** 2 + r ** 2)

    def calculate_path_loss(self, distance):
        fspl_db = 20 * np.log10(4 * math.pi * distance / self.wavelength)
        atm_db = 0.01 * distance / 1000.0
        total_db = fspl_db + atm_db
        return 10 ** (-total_db / 10.0)

    def generate_small_scale_fading(self, prev_fading=None):
        """
        生成或更新小尺度衰落(Rayleigh), 采用AR(1).
        """
        N = self.config.N_UE
        if prev_fading is None:
            real_part = np.random.normal(0, 1 / np.sqrt(2), N)
            imag_part = np.random.normal(0, 1 / np.sqrt(2), N)
            fading = real_part + 1j * imag_part
        else:
            innovation_real = np.random.normal(0, 1 / np.sqrt(2), N)
            innovation_imag = np.random.normal(0, 1 / np.sqrt(2), N)
            innovation = innovation_real + 1j * innovation_imag
            fading = self.rho * prev_fading + np.sqrt(1 - self.rho ** 2) * innovation
        return fading

    def generate_channel_gain(self, prev_fading=None):
        """
        输出: (estimated_channel_gain, true_channel_gain, small_scale_fading)
        - true_channel_gain = path_loss * |fading|^2
        - estimated_channel_gain = true_channel_gain * (1 + csi_error)
        """
        path_loss = self.calculate_path_loss(self.user_distances)
        fading = self.generate_small_scale_fading(prev_fading)
        power_fading = np.abs(fading) ** 2
        true_gain = path_loss * power_fading

        # 加上 CSI 误差
        csi_error = np.random.normal(0, self.sigma_e, self.config.N_UE)
        estimated_gain = true_gain * (1 + csi_error)

        return estimated_gain, true_gain, fading

    def update_channel_state(self, delta_t_sec, prev_fading):
        """
        每经过 delta_t_sec秒, 卫星移动, 更新与用户的距离, 并生成新的信道增益
        """
        move = self.satellite_velocity * delta_t_sec
        self.satellite_x += move

        x_user = self.user_positions[:, 0]
        y_user = self.user_positions[:, 1]
        horiz_dist = np.sqrt((x_user - self.satellite_x) ** 2 + y_user ** 2)
        self.user_distances = np.sqrt(self.satellite_height ** 2 + horiz_dist ** 2)

        return self.generate_channel_gain(prev_fading)


# =========== 4. 环境 ===============
class LEONBIoTEnv:
    def __init__(self, config, robust=True):
        self.config = config
        self.channel_model = ChannelModel(config)
        self.robust = robust

        self.delta_t = 1.0  # 每个step代表1ms
        self.snr_db = 20.0
        self.snr_linear = 10 ** (self.snr_db / 10.0)

        self.current_step = 0
        self.max_steps = config.max_steps

        # 用于后续每步更新小尺度衰落
        self.prev_fading = None

        self.reset()

    def reset(self):
        self.current_step = 0
        self.devices = [IoTDevice(self.config) for _ in range(self.config.N_UE)]
        self.prev_fading = None
        # 初始化信道
        self.estimated_gain, self.true_gain, self.prev_fading = \
            self.channel_model.generate_channel_gain(self.prev_fading)
        return self._get_state()

    def _get_state(self):
        """
        返回一个1D向量作为观测:
          - self.estimated_gain: size N_UE
          - remaining_data: size N_UE
          - delays: size N_UE
          - 当前时间步占比(仅1个数)
        总维度: 3*N_UE + 1
        """
        est_g = self.estimated_gain
        remain_data = np.array([d.data_size - d.transmitted_data for d in self.devices])
        delays = np.array([d.delay_constraint for d in self.devices])
        time_ratio = self.current_step / self.max_steps

        state = np.concatenate([est_g, remain_data, delays, [time_ratio]])
        return state

    def step(self, action):
        """
        action.shape = (2, ), 连续[0,1].
        解析:  action[0] -> device_idx,   action[1] -> n_rb
        """
        device_idx, n_rb = self._parse_action(action)

        # 计算鲁棒裕度
        robust_margin = self._calc_robust_margin(self.estimated_gain[device_idx], device_idx)
        effective_gain = max(0.0, self.estimated_gain[device_idx] - robust_margin)

        # 计算本步可传输的数据量(简化香农公式)
        capacity_per_rb = 180e3 * math.log2(1 + effective_gain * self.snr_linear)  # bit/s
        data_this_step = capacity_per_rb * 1e-3 * n_rb  # 1ms -> x1e-3

        # 更新对应设备的传输状态
        dev = self.devices[device_idx]
        dev.update_transmission(data_this_step, self.delta_t)

        # 计算奖励
        reward = self._calc_reward(device_idx)

        # 更新步数 & 信道
        self.current_step += 1
        self.estimated_gain, self.true_gain, self.prev_fading = \
            self.channel_model.update_channel_state(self.delta_t * 1e-3, self.prev_fading)

        # done条件: 到达最大步数 或 所有设备完成传输
        done = (self.current_step >= self.max_steps) or all(d.transmission_complete for d in self.devices)
        info = {
            "device_idx": device_idx,
            "allocated_rbs": n_rb
        }

        return self._get_state(), reward, done, info

    def _parse_action(self, action):
        # 先clip到 [0,1]
        action = np.clip(action, 0.0, 1.0)

        # device_idx
        # ★ 修正: 防止索引= self.config.N_UE, 取 min( , N_UE-1)
        raw_idx = int(action[0] * self.config.N_UE)
        device_idx = min(raw_idx, self.config.N_UE - 1)

        # n_rb
        raw_rb = int(action[1] * self.config.N_RB)
        n_rb = min(max(raw_rb, 1), self.config.N_RB)  # 保证至少1, 最多N_RB

        return device_idx, n_rb

    def _calc_robust_margin(self, ch_gain, device_idx):
        """
        若robust=True, 则基于简单的切比雪夫界 sqrt(var/epsilon)*ch_gain
        """
        if not self.robust:
            return 0.0
        variance = self.config.sigma_e ** 2
        eps = 1.0 - self.devices[device_idx].reliability
        if eps <= 0:
            # 如果 reliability=1.0, eps=0, 简化: margin=variance*ch_gain 或你可以返回0
            return variance * ch_gain
        margin = math.sqrt(variance / eps) * ch_gain
        return margin

    def _calc_reward(self, device_idx):
        """
        简易奖励:
          - 如果本步完成传输:
              1) 若在delay_constraint内 -> +10
              2) 若超时 -> -5
          - 未完成则给个小负值
        """
        dev = self.devices[device_idx]
        reward = 0.0
        if dev.transmission_complete:
            if dev.transmission_time <= dev.delay_constraint:
                reward += 10.0
            else:
                reward -= 5.0
        else:
            reward -= 0.01
        return reward


# =========== 5. ActorCritic (PPO网络) ===============
class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super().__init__()
        self.fc = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU()
        )
        # 动作分布(连续), 输出mean和std
        self.actor_mean = nn.Linear(128, action_dim)
        self.actor_std = nn.Linear(128, action_dim)

        # Critic
        self.critic = nn.Linear(128, 1)

    def forward(self, state):
        feat = self.fc(state)
        mean = torch.sigmoid(self.actor_mean(feat))  # 限定在[0,1]
        std = F.softplus(self.actor_std(feat)) + 1e-6
        value = self.critic(feat)
        return mean, std, value

    def get_action_and_value(self, state):
        """
        给定单个/批量状态, 采样动作(连续2维),
        返回: action, log_prob, value
        """
        mean, std, value = self(state)
        dist = Normal(mean, std)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum(dim=-1)
        return action, log_prob, value

    def evaluate_action(self, states, actions):
        """
        计算给定 (state, action) 的 log_prob, entropy, value
        用于PPO更新
        """
        mean, std, values = self(states)
        dist = Normal(mean, std)
        log_probs = dist.log_prob(actions).sum(dim=-1)
        entropy = dist.entropy().sum(dim=-1)
        return log_probs, entropy, values

import matplotlib.pyplot as plt
# =========== 6. PPO主体 (简化示例) ===========
class PPO:
    def __init__(self, config, env):
        self.config = config
        self.env = env

        state_dim = len(env.reset())  # 3*N_UE + 1
        action_dim = 2  # [device_idx, n_rb]
        self.actor_critic = ActorCritic(state_dim, action_dim)
        self.optimizer = optim.Adam(self.actor_critic.parameters(), lr=config.lr)
        self.gamma = config.gamma
        self.lam = config.lambda_gae
        self.clip_ratio = config.clip_ratio

        self.all_rewards = []
        self.all_success_rates = []

    def collect_trajectory(self):
        """
        与环境交互一条完整episode, 直到 done=True.
        """
        states, actions, rewards, log_probs, dones, values = [], [], [], [], [], []
        state = self.env.reset()

        while True:
            s_tensor = torch.FloatTensor(state).unsqueeze(0)
            with torch.no_grad():
                action, lp, val = self.actor_critic.get_action_and_value(s_tensor)
            next_state, r, done, info = self.env.step(action.numpy()[0])

            states.append(state)
            actions.append(action.numpy()[0])
            rewards.append(r)
            log_probs.append(lp.item())
            values.append(val.item())
            dones.append(done)

            state = next_state
            if done:
                break
        return states, actions, rewards, log_probs, dones, values

    def compute_gae(self, rewards, values, dones, last_val):
        """
        广义优势估计(GAE)计算
        """
        advantages = []
        gae = 0
        values = values + [last_val]
        for i in reversed(range(len(rewards))):
            mask = 1 - int(dones[i])  # 若done则不再bootstrap
            delta = rewards[i] + self.gamma * values[i + 1] * mask - values[i]
            gae = delta + self.gamma * self.lam * mask * gae
            advantages.insert(0, gae)
        returns = [advantages[i] + values[i] for i in range(len(rewards))]
        return advantages, returns

    def update_policy(self, traj):
        states, actions, rewards, old_log_probs, dones, values = traj
        # 若最终没done, 需要bootstrap最后一个状态的V
        last_val = 0
        if not dones[-1]:
            s_tensor = torch.FloatTensor(states[-1]).unsqueeze(0)
            with torch.no_grad():
                _, _, v_ = self.actor_critic.get_action_and_value(s_tensor)
            last_val = v_.item()

        advantages, returns = self.compute_gae(rewards, values, dones, last_val)

        states_t = torch.FloatTensor(states)
        actions_t = torch.FloatTensor(actions)
        old_log_probs_t = torch.FloatTensor(old_log_probs)
        advantages_t = torch.FloatTensor(advantages)
        returns_t = torch.FloatTensor(returns)

        # 这里简化做多次mini-batch更新, 也可按batch_size分批
        for _ in range(5):
            logp, entropy, v = self.actor_critic.evaluate_action(states_t, actions_t)
            ratio = torch.exp(logp - old_log_probs_t)
            surr1 = ratio * advantages_t
            surr2 = torch.clamp(ratio, 1 - self.clip_ratio, 1 + self.clip_ratio) * advantages_t

            policy_loss = -torch.min(surr1, surr2).mean()
            value_loss = 0.5 * (returns_t - v).pow(2).mean()
            entropy_loss = -0.01 * entropy.mean()
            loss = policy_loss + value_loss + entropy_loss

            self.optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(self.actor_critic.parameters(), 0.5)
            self.optimizer.step()

    def train(self, episodes=50):
        for ep in range(episodes):
            traj = self.collect_trajectory()
            self.update_policy(traj)

            ep_reward = sum(traj[2])  # sum of rewards
            self.all_rewards.append(ep_reward)

            # 统计成功率:
            # 在 env 结束时, env.devices 里可以数多少个是 transmission_complete 且没超时
            completes = 0
            for d in self.env.devices:
                if d.transmission_complete and d.transmission_time <= d.delay_constraint:
                    completes += 1
            success_rate = completes / len(self.env.devices)
            self.all_success_rates.append(success_rate)

            print(f"Episode {ep+1}/{episodes}, Reward={ep_reward:.2f}, SuccessRate={success_rate:.2f}")

        # 训练结束后画一条收敛曲线:
        #self.plot_learning_curve()

    def plot_learning_curve(self):
        plt.figure()
        plt.subplot(2, 1, 1)
        plt.plot(self.all_rewards, label='Episode Reward')
        plt.xlabel('Episode')
        plt.ylabel('Reward')
        plt.legend()
        plt.grid(True)

        plt.subplot(2, 1, 2)
        plt.plot(self.all_success_rates, label='Success Rate')
        plt.xlabel('Episode')
        plt.ylabel('Rate')
        plt.legend()
        plt.grid(True)

        plt.tight_layout()
        plt.show()


def test_fixed_policy(env):
    """
    对给定的环境env，采用固定策略，每步分配 N_RB=全部 的资源给第一个还没完成的设备。
    看能否在 max_steps 内让大部分设备完成传输。
    """
    state = env.reset()
    done = False
    total_reward = 0.0
    while not done:
        # 找第一个尚未完成的设备
        idx_not_complete = [i for i, d in enumerate(env.devices) if not d.transmission_complete]
        if len(idx_not_complete) == 0:
            # 所有都完成
            action = np.array([0.0, 1.0])  # 随便给个不会出错的动作
        else:
            first_dev = idx_not_complete[0]
            # device_idx -> first_dev / (N_UE)
            x0 = first_dev / env.config.N_UE
            # n_rb -> 100% RB
            x1 = 1.0
            action = np.array([x0, x1])

        next_state, reward, done, info = env.step(action)
        total_reward += reward
        state = next_state

    # 最后统计多少终端完成，完成的时延如何
    completes = 0
    for d in env.devices:
        if d.transmission_complete and d.transmission_time <= d.delay_constraint:
            completes += 1
    success_rate = completes / env.config.N_UE
    print(f"[Test Fixed Policy] Reward={total_reward:.2f}, SuccessRate={success_rate:.2f}")


def main():

    config = SimConfig()

    # 1) 训练鲁棒算法
    robust_env = LEONBIoTEnv(config, robust=True)
    robust_ppo = PPO(config, robust_env)
    print("=== Training Robust PPO ===")
    robust_ppo.train(episodes=20)

    # 2) 训练非鲁棒算法
    non_robust_env = LEONBIoTEnv(config, robust=False)
    non_robust_ppo = PPO(config, non_robust_env)
    print("\n=== Training Non-Robust PPO ===")
    non_robust_ppo.train(episodes=20)

    # 你可以在此对robust_ppo和non_robust_ppo做进一步评估对比
    # 比如: 重复多轮episode, 统计成功率、平均奖励、超时率等



if __name__ == "__main__":
    main()
