import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
import random
import copy
from tqdm import tqdm

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 目标信息（表5）
targets = [
    {"threat": 6, "com_range": 20, "com_power": 100, "jam_range": 30, "freq_points": 33},
    {"threat": 5, "com_range": 20, "com_power": 100, "jam_range": 50, "freq_points": 32},
    {"threat": 4, "com_range": 50, "com_power": 100, "jam_range": 70, "freq_points": 64},
    {"threat": 3, "com_range": 50, "com_power": 100, "jam_range": 90, "freq_points": 64},
    {"threat": 2, "com_range": 20, "com_power": 100, "jam_range": 110, "freq_points": 128},
    {"threat": 1, "com_range": 20, "com_power": 100, "jam_range": 130, "freq_points": 128}
]

# 频率范围
freq_min = 200  # MHz
freq_max = 400  # MHz


# 生成频率点分布（模拟图5）
def generate_frequency_points():
    freq_points = {}
    # 目标1: 33个频点 (集中在225-275MHz)
    freq_points[0] = np.linspace(225, 275, 33)
    # 目标2: 32个频点 (集中在250-300MHz)
    freq_points[1] = np.linspace(250, 300, 32)
    # 目标3: 64个频点 (集中在275-325MHz)
    freq_points[2] = np.linspace(275, 325, 64)
    # 目标4: 64个频点 (集中在300-350MHz)
    freq_points[3] = np.linspace(300, 350, 64)
    # 目标5: 128个频点 (集中在325-375MHz)
    freq_points[4] = np.linspace(325, 375, 128)
    # 目标6: 128个频点 (集中在350-400MHz)
    freq_points[5] = np.linspace(350, 400, 128)
    return freq_points


# 环境类
class CommunicationJammingEnv:
    def __init__(self, num_jammers):
        self.num_targets = len(targets)
        self.num_jammers = num_jammers
        self.freq_points = generate_frequency_points()
        self.all_freqs = np.concatenate(list(self.freq_points.values()))
        self.all_freqs.sort()
        self.max_bandwidth = 3.0  # MHz
        self.min_bandwidth = 1.0  # MHz
        self.bandwidth_step = 0.2  # MHz
        self.num_bandwidth_options = 11  # 1.0-3.0MHz, 步长0.2
        self.suppression_coefficient = 2.0  # 压制系数
        self.jam_power = 30  # kW
        self.reset()

        # 划分子频段 (算法1)
        self.sub_bands = self._segment_frequency_bands()
        self.num_sub_bands = len(self.sub_bands)

    def _segment_frequency_bands(self, max_bandwidth=3.0):
        """划分子频段 (算法1步骤2)"""
        sub_bands = []
        start = self.all_freqs[0]

        for i in range(1, len(self.all_freqs)):
            if self.all_freqs[i] - start > max_bandwidth:
                sub_bands.append((start, self.all_freqs[i - 1]))
                start = self.all_freqs[i]

        sub_bands.append((start, self.all_freqs[-1]))
        return sub_bands

    def reset(self):
        """重置环境状态"""
        # 目标是否被干扰
        self.jammed_targets = [False] * self.num_targets
        # 干扰站使用状态
        self.jammers_used = 0
        # 干扰频点集合
        self.jammed_freqs = set()
        # 当前目标索引 (按威胁系数排序)
        self.current_target_idx = 0
        # 总干扰频点数
        self.total_jammed_points = 0
        # 所有频点总数
        self.total_freq_points = sum(len(points) for points in self.freq_points.values())
        return self._get_state()

    def _get_state(self):
        """获取当前状态 (S1)"""
        # 状态: [C_A, C_S, g, A1]
        # 这里简化实现，实际应根据论文描述
        if self.jammers_used < self.num_jammers:
            # 当前目标g (威胁系数归一化)
            g = targets[self.current_target_idx]["threat"] / 6.0

            # 随机生成C_A, C_S (实际应根据子频段计算)
            C_A = random.random()
            C_S = random.random()

            # 上一个动作A1 (归一化)
            A1 = random.random() if self.jammers_used == 0 else self.last_A1

            return [C_A, C_S, g, A1]
        return None

    def step(self, A1, A2):
        """执行动作 (A1: 子频段索引, A2: 带宽选择)"""
        # 计算实际带宽 (公式8)
        bandwidth = self.max_bandwidth - A2 * self.bandwidth_step

        # 随机选择子频段
        band_idx = min(A1, self.num_sub_bands - 1)
        band_start, band_end = self.sub_bands[band_idx]
        jam_center = (band_start + band_end) / 2

        # 计算干扰效果 (简化实现)
        jammed_points = 0
        for idx, freqs in self.freq_points.items():
            if self.jammed_targets[idx]:
                continue

            # 检查频点是否在干扰带宽内
            in_band = [f for f in freqs if jam_center - bandwidth / 2 <= f <= jam_center + bandwidth / 2]
            jammed_points += len(in_band)

            # 干扰成功条件 (简化)
            if len(in_band) > len(freqs) / 3:
                self.jammed_targets[idx] = True

        # 更新状态
        self.total_jammed_points += jammed_points
        self.jammers_used += 1

        # 计算奖励
        r_base, r_appendix = self._calculate_reward(jammed_points)
        r1 = r_base + r_appendix
        r2 = self._calculate_r2(bandwidth, jammed_points)

        # 更新当前目标
        if self.current_target_idx < self.num_targets - 1:
            self.current_target_idx += 1

        next_state = self._get_state()
        done = self.jammers_used >= self.num_jammers or all(self.jammed_targets)

        return next_state, r1, r2, done

    def _calculate_reward(self, jammed_points):
        """计算干扰频段决策器奖励 (公式9-11)"""
        # 当前目标
        target_idx = self.current_target_idx
        target_threat = targets[target_idx]["threat"]

        # r_base: 是否成功干扰目标
        if self.jammed_targets[target_idx]:
            r_base = target_threat * 10
        else:
            r_base = -2

        # r_appendix: 干扰频点总数是否超过阈值
        threshold = self.total_freq_points / (3 * self.num_jammers)
        if jammed_points > threshold:
            r_appendix = 10
        else:
            r_appendix = 0

        return r_base, r_appendix

    def _calculate_r2(self, bandwidth, jammed_points):
        """计算干扰带宽决策器奖励 (公式12)"""
        # 简化冗余带宽计算
        if jammed_points > 0:
            return 10 * bandwidth
        return -2


# 神经网络模型
class DQN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_dim, 64)
        self.fc2 = nn.Linear(64, 32)
        self.fc3 = nn.Linear(32, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# BETMR经验回放机制
class BETMR:
    def __init__(self, capacity, expert_capacity):
        self.normal_memory = deque(maxlen=capacity)
        self.expert_memory = deque(maxlen=expert_capacity)
        self.thresholds = [50, 100, 150, 200]  # 阈值集
        self.current_threshold_idx = 0

    def push(self, experience, is_expert=False):
        if is_expert:
            self.expert_memory.append(experience)
        else:
            self.normal_memory.append(experience)

    def sample(self, batch_size, rho=0.7):
        normal_batch = min(batch_size // 2, len(self.normal_memory))
        expert_batch = batch_size - normal_batch

        normal_sample = random.sample(self.normal_memory, normal_batch) if normal_batch > 0 else []
        expert_sample = random.sample(self.expert_memory, expert_batch) if expert_batch > 0 else []

        return normal_sample + expert_sample

    def update_threshold(self, total_reward):
        """更新阈值 (自举机制)"""
        if total_reward < self.thresholds[self.current_threshold_idx]:
            # 保持当前阈值
            return False

        if self.current_threshold_idx < len(self.thresholds) - 1:
            self.current_threshold_idx += 1
            self.expert_memory.clear()
            return True

        return False


# BHJM算法代理
class BHJMAgent:
    def __init__(self, state_dim1, action_dim1, state_dim2, action_dim2):
        # 干扰频段决策器
        self.q_net1 = DQN(state_dim1, action_dim1).to(device)
        self.target_q_net1 = DQN(state_dim1, action_dim1).to(device)
        self.target_q_net1.load_state_dict(self.q_net1.state_dict())
        self.optimizer1 = optim.Adam(self.q_net1.parameters(), lr=0.001)

        # 干扰带宽决策器
        self.q_net2 = DQN(state_dim2, action_dim2).to(device)
        self.target_q_net2 = DQN(state_dim2, action_dim2).to(device)
        self.target_q_net2.load_state_dict(self.q_net2.state_dict())
        self.optimizer2 = optim.Adam(self.q_net2.parameters(), lr=0.001)

        # 经验回放
        self.memory1 = BETMR(10000, 1000)
        self.memory2 = BETMR(10000, 1000)

        # 训练参数
        self.gamma = 0.99
        self.batch_size = 64
        self.update_target_freq = 100
        self.steps_done = 0

    def select_action(self, state, epsilon, net_type=1):
        """使用ε-greedy策略选择动作"""
        if net_type == 1:  # 干扰频段决策器
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
            q_values = self.q_net1(state_tensor)
            action_dim = q_values.shape[1]
        else:  # 干扰带宽决策器
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
            q_values = self.q_net2(state_tensor)
            action_dim = q_values.shape[1]

        if random.random() > epsilon:
            action = q_values.argmax(dim=1).item()
        else:
            action = random.randint(0, action_dim - 1)

        return action

    def train(self):
        # 训练干扰频段决策器
        if len(self.memory1.normal_memory) + len(self.memory1.expert_memory) < self.batch_size:
            return

        batch = self.memory1.sample(self.batch_size)
        states, actions, rewards, next_states = zip(*batch)

        states = torch.FloatTensor(states).to(device)
        actions = torch.LongTensor(actions).unsqueeze(1).to(device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(device)
        next_states = torch.FloatTensor([s for s in next_states if s is not None]).to(device)

        # 计算当前Q值
        current_q = self.q_net1(states).gather(1, actions)

        # 计算目标Q值
        next_q = torch.zeros(self.batch_size, device=device)
        non_final_mask = torch.tensor([s is not None for s in next_states], dtype=torch.bool)
        if non_final_mask.any():
            next_q[non_final_mask] = self.target_q_net1(next_states).max(1)[0].detach()

        target_q = rewards + self.gamma * next_q.unsqueeze(1)

        # 计算损失
        loss = nn.MSELoss()(current_q, target_q)

        # 优化模型
        self.optimizer1.zero_grad()
        loss.backward()
        self.optimizer1.step()

        # 训练干扰带宽决策器 (类似代码)
        # ...

        # 更新目标网络
        if self.steps_done % self.update_target_freq == 0:
            self.target_q_net1.load_state_dict(self.q_net1.state_dict())
            self.target_q_net2.load_state_dict(self.q_net2.state_dict())

        self.steps_done += 1


# 训练函数
def train_agent(num_jammers, num_episodes=1000):
    env = CommunicationJammingEnv(num_jammers)
    agent = BHJMAgent(
        state_dim1=4,  # [C_A, C_S, g, A1]
        action_dim1=env.num_sub_bands,
        state_dim2=4,  # 简化状态
        action_dim2=env.num_bandwidth_options
    )

    jammed_targets_history = {i: [] for i in range(env.num_targets)}

    for episode in tqdm(range(num_episodes), desc=f"Jammers: {num_jammers}"):
        state = env.reset()
        total_reward1 = 0
        total_reward2 = 0

        while state is not None:
            # 干扰频段决策器
            epsilon = max(0.1, 1.0 - episode / (num_episodes * 0.8))
            A1 = agent.select_action(state, epsilon, net_type=1)

            # 干扰带宽决策器
            # 状态S2: [state + A1] (简化实现)
            state2 = state + [A1 / env.num_sub_bands]
            A2 = agent.select_action(state2, epsilon, net_type=2)

            # 执行动作
            next_state, r1, r2, done = env.step(A1, A2)

            # 存储经验
            agent.memory1.push((state, A1, r1, next_state))
            agent.memory2.push((state2, A2, r2, next_state))

            # 更新状态
            state = next_state
            total_reward1 += r1
            total_reward2 += r2

            # 训练
            agent.train()

        # 更新专家轨迹阈值
        if agent.memory1.update_threshold(total_reward1):
            pass

        # 记录干扰结果
        for i in range(env.num_targets):
            jammed_targets_history[i].append(1 if env.jammed_targets[i] else 0)

    return jammed_targets_history


# 主函数
def main():
    num_episodes = 1000
    num_jammers_list = [6, 7, 8, 9, 10, 11, 12]
    results = {}

    # 训练不同干扰站数量的模型
    for num_jammers in num_jammers_list:
        results[num_jammers] = train_agent(num_jammers, num_episodes)

    # 绘制结果 (图6)
    plt.figure(figsize=(15, 10))
    colors = ['b', 'g', 'r', 'c', 'm', 'y']

    for i, num_jammers in enumerate(num_jammers_list):
        plt.subplot(3, 3, i + 1)
        data = results[num_jammers]

        for target_idx in range(len(targets)):
            # 计算移动平均 (窗口大小=50)
            cum_avg = np.convolve(data[target_idx], np.ones(50) / 50, mode='valid')
            plt.plot(cum_avg, color=colors[target_idx],
                     label=f'Target {target_idx + 1}' if i == 0 else "")

        plt.title(f'{num_jammers} Jammers')
        plt.xlabel('Episodes')
        plt.ylabel('Success Rate')
        plt.ylim(0, 1.1)
        plt.grid(True)

        if i == 0:
            plt.legend(loc='lower right')

    plt.tight_layout()
    plt.savefig('jamming_performance.png')
    plt.show()

    # 保存结果
    print("Training completed. Results saved to jamming_performance.png")


if __name__ == "__main__":
    main()