# trainer.py

import torch
import torch.optim as optim
import numpy as np
from config import *
from models import FrequencyQNetwork, BandwidthQNetwork
from environment import JammingEnvironment
from betmr import BETMRExperienceReplay
from utils import get_state_vector

class DQNAgent:
    def __init__(self, q_network, optimizer, gamma):
        self.q_network = q_network
        self.target_network = type(q_network)()
        self.optimizer = optimizer
        self.gamma = gamma
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.target_network.eval()

    def act(self, state, epsilon):
        # ε-greedy 策略
        if np.random.rand() < epsilon:
            return np.random.randint(ACTION_SPACE_FREQ)  # 随机动作
        else:
            with torch.no_grad():
                q_values = self.q_network(state)
                return q_values.argmax().item()

    def update(self, experiences):
        states, actions, rewards, next_states, dones = experiences
        # 计算损失并更新网络
        loss = self._compute_loss(states, actions, rewards, next_states, dones)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def _compute_loss(self, states, actions, rewards, next_states, dones):
        # DQN 损失函数
        q_values = self.q_network(states).gather(1, actions.unsqueeze(1))
        next_q_values = self.target_network(next_states).max(1)[0].detach()
        expected_q_values = rewards + (1 - dones) * self.gamma * next_q_values
        loss = torch.nn.MSELoss()(q_values.squeeze(), expected_q_values)
        return loss

def train():
    env = JammingEnvironment()
    betmr = BETMRExperienceReplay()

    # 初始化模型和优化器
    freq_net = FrequencyQNetwork()
    band_net = BandwidthQNetwork()
    freq_optimizer = optim.Adam(freq_net.parameters(), lr=LEARNING_RATE)
    band_optimizer = optim.Adam(band_net.parameters(), lr=LEARNING_RATE)

    agents = {
        "freq": DQNAgent(freq_net, freq_optimizer, GAMMA),
        "band": DQNAgent(band_net, band_optimizer, GAMMA)
    }

    for episode in range(MAX_EPISODES):
        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            # 获取状态向量
            state_vec = get_state_vector(state)
            # 分层决策
            action_freq = agents["freq"].act(state_vec["freq"], EPSILON)
            action_band = agents["band"].act(state_vec["band"], EPSILON)

            # 执行动作
            next_state, reward, done = env.step(action_freq, action_band)
            total_reward += reward

            # 存储经验
            experience = (state_vec, action_freq, action_band, reward, next_state)
            betmr.add_experience(experience, total_reward)

        # 训练模型
        if len(betmr.normal_pool) >= BATCH_SIZE:
            samples = betmr.sample(BATCH_SIZE)
            # 提取批次数据
            states, actions, rewards, next_states, dones = zip(*samples)
            # 更新网络
            agents["freq"].update(samples)
            agents["band"].update(samples)

        # 更新目标网络
        if episode % TARGET_UPDATE_FREQ == 0:
            agents["freq"].target_network.load_state_dict(agents["freq"].q_network.state_dict())
            agents["band"].target_network.load_state_dict(agents["band"].q_network.state_dict())

        print(f"Episode {episode}, Total Reward: {total_reward}")