import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import copy
import time
import logging
import os

torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 环境参数
MAP_SIZE = 100.0
NUM_POIS = 8
NUM_CPS = 3
MAX_STEPS_PER_EPISODE = 50

# UAV 参数
UAV_MAX_ENERGY = 1000.0
UAV_SPEED = 15.0
UAV_DATA_BUFFER_CAPACITY = 100.0
UAV_HOVER_ENERGY_RATE = 2.0
UAV_FLIGHT_ENERGY_RATE = 0.5
UAV_DATA_COLLECTION_RATE = 10.0
UAV_LOCAL_PROCESS_ENERGY_RATE = 0.1

# MECV 参数
MECV_MAX_ENERGY = 2000.0
MECV_SPEED = 10.0
MECV_COMPUTE_POWER = 25.0
MECV_CHARGE_RATE = 20.0
MECV_FLIGHT_ENERGY_RATE = 0.8
MECV_COMPUTE_ENERGY_RATE = 0.5

# 奖励函数常量 (缩放到10以内)
REWARD_DATA_COLLECTED = 0.1
REWARD_DATA_PROCESSED = 0.15
REWARD_UAV_CHARGED = 0.02
PENALTY_TIME_STEP = -0.05
PENALTY_ENERGY_CONSUMPTION = -0.001
PENALTY_INVALID_ACTION = -2.0
PENALTY_ENERGY_DEPLETION = -5.0
BONUS_TASK_COMPLETION = 5.0

# 智能体参数
LEARNING_RATE = 3e-4
GAMMA = 0.99
TAU = 0.005
MEMORY_SIZE = 20000
BATCH_SIZE = 128
EPSILON_START = 1.0
EPSILON_DECAY = 0.995
EPSILON_MIN = 0.05
ALPHA = 0.2
TARGET_ENTROPY = -2.0
HIDDEN_SIZE = 256


class UAV_MEC_Env:
    def __init__(self):
        self.map_size = MAP_SIZE
        self.num_pois = NUM_POIS
        self.num_cps = NUM_CPS
        self.max_steps = MAX_STEPS_PER_EPISODE
        self.uav_max_energy = UAV_MAX_ENERGY
        self.mecv_max_energy = MECV_MAX_ENERGY

        self.pois = np.random.uniform(0, self.map_size, (self.num_pois, 2))
        self.cps = np.random.uniform(0, self.map_size, (self.num_cps, 2))

        self.reset()

    def reset(self):
        self.uav_pos = np.array([self.map_size * 0.1, self.map_size * 0.1])
        self.uav_energy = self.uav_max_energy
        self.uav_data_buffer = 0.0
        self.mecv_pos = np.array([self.map_size * 0.2, self.map_size * 0.2])
        self.mecv_energy = self.mecv_max_energy
        self.poi_visited = np.zeros(self.num_pois, dtype=bool)
        self.current_step = 0
        self.total_data_collected = 0.0

        # 详细统计信息
        self.step_details = {
            'flight_time': 0.0, 'hover_time': 0.0, 'compute_time': 0.0,
            'flight_energy': 0.0, 'hover_energy': 0.0, 'compute_energy': 0.0, 'local_process_energy': 0.0,
            'data_collect_reward': 0.0, 'data_process_reward': 0.0, 'charge_reward': 0.0,
            'time_penalty': 0.0, 'energy_penalty': 0.0, 'invalid_penalty': 0.0
        }

        return self._get_state()

    def _get_state(self):
        state = np.concatenate([
            self.uav_pos / self.map_size,
            [self.uav_energy / self.uav_max_energy],
            [self.uav_data_buffer / UAV_DATA_BUFFER_CAPACITY],
            self.mecv_pos / self.map_size,
            [self.mecv_energy / self.mecv_max_energy],
            self.poi_visited.astype(float),
            [self.current_step / self.max_steps]
        ])
        return state

    def step(self, action):
        self.step_details = {k: 0.0 for k in self.step_details}

        if action['high_level_choice'] == 0:
            reward = self._execute_sense_action(action)
        elif action['high_level_choice'] == 1:
            reward = self._execute_rendezvous_action(action)

        self.step_details['time_penalty'] = PENALTY_TIME_STEP
        reward += PENALTY_TIME_STEP

        self.current_step += 1
        done = False

        if self.uav_energy <= 0 or self.mecv_energy <= 0:
            self.step_details['invalid_penalty'] += PENALTY_ENERGY_DEPLETION
            reward += PENALTY_ENERGY_DEPLETION
            done = True

        if self.current_step >= self.max_steps:
            done = True

        if np.all(self.poi_visited):
            reward += BONUS_TASK_COMPLETION
            done = True

        info = {
            'total_data_collected': self.total_data_collected,
            'uav_energy': self.uav_energy,
            'mecv_energy': self.mecv_energy,
            'pois_visited': np.sum(self.poi_visited),
            'total_pois': self.num_pois,
            'step_details': self.step_details
        }

        return self._get_state(), reward, done, info

    def _execute_sense_action(self, action):
        unvisited_pois = np.where(~self.poi_visited)[0]
        if len(unvisited_pois) == 0:
            self.step_details['invalid_penalty'] = PENALTY_INVALID_ACTION
            return PENALTY_INVALID_ACTION

        distances_to_pois = {i: np.linalg.norm(self.uav_pos - self.pois[i]) for i in unvisited_pois}
        nearest_poi_idx = min(distances_to_pois, key=distances_to_pois.get)

        distance = distances_to_pois[nearest_poi_idx]
        flight_time = distance / UAV_SPEED
        flight_energy = distance * UAV_FLIGHT_ENERGY_RATE
        self.step_details['flight_time'] = flight_time
        self.step_details['flight_energy'] = flight_energy

        self.uav_energy -= flight_energy
        self.uav_pos = self.pois[nearest_poi_idx].copy()

        if self.uav_energy <= 0:
            self.uav_energy = 0
            self.step_details['energy_penalty'] = flight_energy * PENALTY_ENERGY_CONSUMPTION
            return flight_energy * PENALTY_ENERGY_CONSUMPTION

        hover_time, offload_rate = action['low_level_params']
        data_collected = hover_time * UAV_DATA_COLLECTION_RATE
        hover_energy = hover_time * UAV_HOVER_ENERGY_RATE
        self.step_details['hover_time'] = hover_time
        self.step_details['hover_energy'] = hover_energy

        self.uav_energy -= hover_energy

        if self.uav_energy <= 0:
            self.uav_energy = 0
            energy_penalty = (flight_energy + hover_energy) * PENALTY_ENERGY_CONSUMPTION
            self.step_details['energy_penalty'] = energy_penalty
            return energy_penalty

        data_to_buffer = data_collected * offload_rate
        data_for_local = data_collected * (1 - offload_rate)

        available_buffer = UAV_DATA_BUFFER_CAPACITY - self.uav_data_buffer
        actual_buffered_data = min(data_to_buffer, available_buffer)
        self.uav_data_buffer += actual_buffered_data

        local_process_energy = data_for_local * UAV_LOCAL_PROCESS_ENERGY_RATE
        self.step_details['local_process_energy'] = local_process_energy
        self.uav_energy -= local_process_energy

        self.poi_visited[nearest_poi_idx] = True
        self.total_data_collected += data_collected

        data_reward = data_collected * REWARD_DATA_COLLECTED
        total_energy_consumed = flight_energy + hover_energy + local_process_energy
        energy_penalty = total_energy_consumed * PENALTY_ENERGY_CONSUMPTION

        self.step_details['data_collect_reward'] = data_reward
        self.step_details['energy_penalty'] = energy_penalty

        return data_reward + energy_penalty

    def _execute_rendezvous_action(self, action):
        target_cp = self.cps[action['low_level_params']]

        uav_dist = np.linalg.norm(self.uav_pos - target_cp)
        mecv_dist = np.linalg.norm(self.mecv_pos - target_cp)

        flight_time = max(uav_dist / UAV_SPEED, mecv_dist / MECV_SPEED)
        uav_travel_energy = uav_dist * UAV_FLIGHT_ENERGY_RATE
        mecv_travel_energy = mecv_dist * MECV_FLIGHT_ENERGY_RATE

        self.step_details['flight_time'] = flight_time
        self.step_details['flight_energy'] = uav_travel_energy + mecv_travel_energy

        self.uav_energy -= uav_travel_energy
        self.mecv_energy -= mecv_travel_energy
        self.uav_pos = target_cp.copy()
        self.mecv_pos = target_cp.copy()

        if self.uav_energy <= 0 or self.mecv_energy <= 0:
            self.uav_energy = max(0, self.uav_energy)
            self.mecv_energy = max(0, self.mecv_energy)
            energy_penalty = (uav_travel_energy + mecv_travel_energy) * PENALTY_ENERGY_CONSUMPTION
            self.step_details['energy_penalty'] = energy_penalty
            return energy_penalty

        reward = 0
        if self.uav_data_buffer > 0:
            process_time = self.uav_data_buffer / MECV_COMPUTE_POWER
            compute_energy = self.uav_data_buffer * MECV_COMPUTE_ENERGY_RATE
            charge_amount = min(process_time * MECV_CHARGE_RATE, self.uav_max_energy - self.uav_energy)

            self.step_details['compute_time'] = process_time
            self.step_details['compute_energy'] = compute_energy

            self.mecv_energy -= compute_energy
            self.uav_energy += charge_amount

            data_process_reward = self.uav_data_buffer * REWARD_DATA_PROCESSED
            charge_reward = charge_amount * REWARD_UAV_CHARGED
            self.step_details['data_process_reward'] = data_process_reward
            self.step_details['charge_reward'] = charge_reward

            reward += data_process_reward + charge_reward
            self.uav_data_buffer = 0.0
        else:
            self.step_details['invalid_penalty'] = PENALTY_INVALID_ACTION / 2
            reward += PENALTY_INVALID_ACTION / 2

        total_energy_consumed = uav_travel_energy + mecv_travel_energy + self.step_details['compute_energy']
        energy_penalty = total_energy_consumed * PENALTY_ENERGY_CONSUMPTION
        self.step_details['energy_penalty'] = energy_penalty
        reward += energy_penalty

        return reward


class DuelingDQNNet(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_size=HIDDEN_SIZE):
        super().__init__()
        self.feature = nn.Sequential(nn.Linear(state_dim, hidden_size), nn.ReLU())
        self.advantage = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ReLU(),
                                       nn.Linear(hidden_size, action_dim))
        self.value = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 1))

    def forward(self, x):
        x = self.feature(x)
        advantage = self.advantage(x)
        value = self.value(x)
        q_values = value + (advantage - advantage.mean(dim=-1, keepdim=True))
        return q_values


class SAC_Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_size=HIDDEN_SIZE):
        super().__init__()
        self.LOG_STD_MAX = 2
        self.LOG_STD_MIN = -20
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_size), nn.ReLU(),
            nn.Linear(hidden_size, hidden_size), nn.ReLU(),
            nn.Linear(hidden_size, 2 * action_dim)
        )

    def forward(self, state):
        mean_logstd = self.net(state)
        mean, log_std = torch.chunk(mean_logstd, 2, dim=-1)
        log_std = torch.clamp(log_std, self.LOG_STD_MIN, self.LOG_STD_MAX)
        std = torch.exp(log_std)
        dist = Normal(mean, std)

        unscaled_action = dist.rsample()
        action = torch.tanh(unscaled_action)

        log_prob = dist.log_prob(unscaled_action) - torch.log(1 - action.pow(2) + 1e-6)
        log_prob = log_prob.sum(-1, keepdim=True)

        hover_time = (action[:, 0:1] + 1) * 1.5 + 0.5
        offload_rate = (action[:, 1:2] + 1) * 0.5

        scaled_action = torch.cat([hover_time, offload_rate], dim=1)

        return scaled_action, log_prob


class SAC_Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_size=HIDDEN_SIZE):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim + action_dim, hidden_size), nn.ReLU(),
            nn.Linear(hidden_size, hidden_size), nn.ReLU(),
            nn.Linear(hidden_size, 1)
        )

    def forward(self, state, action):
        return self.net(torch.cat([state, action], dim=1))


class HP_Agent:
    def __init__(self, state_dim, num_cps):
        self.state_dim = state_dim
        self.num_cps = num_cps
        self.gamma = GAMMA
        self.tau = TAU
        self.epsilon = EPSILON_START
        self.epsilon_decay = EPSILON_DECAY
        self.epsilon_min = EPSILON_MIN
        self.batch_size = BATCH_SIZE

        self.memory = deque(maxlen=MEMORY_SIZE)

        # 损失记录
        self.actor_loss = 0.0
        self.critic_loss = 0.0

        self.meta_controller = DuelingDQNNet(state_dim, 2)
        self.target_meta_controller = copy.deepcopy(self.meta_controller)
        self.meta_optimizer = optim.Adam(self.meta_controller.parameters(), lr=LEARNING_RATE)

        self.sense_actor = SAC_Actor(state_dim, 2)
        self.sense_critic1 = SAC_Critic(state_dim, 2)
        self.sense_critic2 = SAC_Critic(state_dim, 2)
        self.target_sense_critic1 = copy.deepcopy(self.sense_critic1)
        self.target_sense_critic2 = copy.deepcopy(self.sense_critic2)

        self.sense_actor_optimizer = optim.Adam(self.sense_actor.parameters(), lr=LEARNING_RATE)
        self.sense_critic_optimizer = optim.Adam(
            list(self.sense_critic1.parameters()) + list(self.sense_critic2.parameters()), lr=LEARNING_RATE)

        self.log_alpha = torch.zeros(1, requires_grad=True)
        self.alpha_optimizer = optim.Adam([self.log_alpha], lr=LEARNING_RATE)
        self.target_entropy = torch.tensor(TARGET_ENTROPY)

        self.rendezvous_controller = DuelingDQNNet(state_dim, num_cps)
        self.target_rendezvous_controller = copy.deepcopy(self.rendezvous_controller)
        self.rendezvous_optimizer = optim.Adam(self.rendezvous_controller.parameters(), lr=LEARNING_RATE)

    def select_action(self, state, training=True):
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        if training and np.random.random() < self.epsilon:
            high_level_action = np.random.choice(2)
        else:
            with torch.no_grad():
                high_level_action = self.meta_controller(state_tensor).argmax().item()

        action = {'high_level_choice': high_level_action}

        if high_level_action == 0:
            with torch.no_grad():
                low_level_params, _ = self.sense_actor(state_tensor)
            action['low_level_params'] = low_level_params.squeeze().numpy()
        else:
            if training and np.random.random() < self.epsilon:
                cp_choice = np.random.choice(self.num_cps)
            else:
                with torch.no_grad():
                    cp_choice = self.rendezvous_controller(state_tensor).argmax().item()
            action['low_level_params'] = cp_choice

        return action

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def train(self):
        if len(self.memory) < self.batch_size:
            return

        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states))
        rewards = torch.FloatTensor(rewards).unsqueeze(1)
        next_states = torch.FloatTensor(np.array(next_states))
        dones = torch.FloatTensor(np.array(dones)).unsqueeze(1)

        sense_mask = torch.tensor([a['high_level_choice'] == 0 for a in actions])
        rendezvous_mask = torch.tensor([a['high_level_choice'] == 1 for a in actions])

        if sense_mask.any():
            self._train_sense_controller(
                states[sense_mask],
                [a for i, a in enumerate(actions) if sense_mask[i]],
                rewards[sense_mask],
                next_states[sense_mask],
                dones[sense_mask]
            )

        if rendezvous_mask.any():
            self._train_rendezvous_controller(
                states[rendezvous_mask],
                [a for i, a in enumerate(actions) if rendezvous_mask[i]],
                rewards[rendezvous_mask],
                next_states[rendezvous_mask],
                dones[rendezvous_mask]
            )

        self._train_meta_controller(states, actions, rewards, next_states, dones)
        self._soft_update_all_targets()
        self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)

    def _train_sense_controller(self, states, actions, rewards, next_states, dones):
        action_params = torch.FloatTensor(np.array([a['low_level_params'] for a in actions]))

        alpha = torch.exp(self.log_alpha.detach())

        with torch.no_grad():
            next_actions, next_log_prob = self.sense_actor(next_states)
            target_q1 = self.target_sense_critic1(next_states, next_actions)
            target_q2 = self.target_sense_critic2(next_states, next_actions)
            target_q = torch.min(target_q1, target_q2) - alpha * next_log_prob
            target_q = rewards + self.gamma * (1 - dones) * target_q

        current_q1 = self.sense_critic1(states, action_params)
        current_q2 = self.sense_critic2(states, action_params)
        critic_loss = F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q)
        self.critic_loss = critic_loss.item()

        self.sense_critic_optimizer.zero_grad()
        critic_loss.backward()
        self.sense_critic_optimizer.step()

        new_actions, log_prob = self.sense_actor(states)
        q1_new = self.sense_critic1(states, new_actions)
        q2_new = self.sense_critic2(states, new_actions)
        actor_loss = (alpha * log_prob - torch.min(q1_new, q2_new)).mean()
        self.actor_loss = actor_loss.item()

        self.sense_actor_optimizer.zero_grad()
        actor_loss.backward()
        self.sense_actor_optimizer.step()

        alpha_loss = -(self.log_alpha * (log_prob + self.target_entropy).detach()).mean()
        self.alpha_optimizer.zero_grad()
        alpha_loss.backward()
        self.alpha_optimizer.step()

    def _train_rendezvous_controller(self, states, actions, rewards, next_states, dones):
        action_params = torch.LongTensor([a['low_level_params'] for a in actions]).unsqueeze(1)

        with torch.no_grad():
            next_actions = self.rendezvous_controller(next_states).argmax(1, keepdim=True)
            target_q = self.target_rendezvous_controller(next_states).gather(1, next_actions)
            target_q = rewards + self.gamma * (1 - dones) * target_q

        current_q = self.rendezvous_controller(states).gather(1, action_params)
        loss = F.mse_loss(current_q, target_q)

        self.rendezvous_optimizer.zero_grad()
        loss.backward()
        self.rendezvous_optimizer.step()

    def _train_meta_controller(self, states, actions, rewards, next_states, dones):
        high_level_actions = torch.LongTensor([a['high_level_choice'] for a in actions]).unsqueeze(1)

        with torch.no_grad():
            next_high_level_actions = self.meta_controller(next_states).argmax(1)

            next_q_values = torch.zeros(len(states), 1)
            for i in range(len(states)):
                if next_high_level_actions[i] == 0:
                    next_action_params, _ = self.sense_actor(next_states[i].unsqueeze(0))
                    next_q_values[i] = torch.min(
                        self.target_sense_critic1(next_states[i].unsqueeze(0), next_action_params),
                        self.target_sense_critic2(next_states[i].unsqueeze(0), next_action_params)
                    )
                else:
                    next_q_values[i] = self.target_rendezvous_controller(next_states[i].unsqueeze(0)).max()

            target_q = rewards + self.gamma * (1 - dones) * next_q_values

        current_q = self.meta_controller(states).gather(1, high_level_actions)
        loss = F.mse_loss(current_q, target_q)

        self.meta_optimizer.zero_grad()
        loss.backward()
        self.meta_optimizer.step()

    def _soft_update_all_targets(self):
        def soft_update(target, source, tau):
            for target_param, source_param in zip(target.parameters(), source.parameters()):
                target_param.data.copy_(tau * source_param.data + (1.0 - tau) * target_param.data)

        soft_update(self.target_meta_controller, self.meta_controller, self.tau)
        soft_update(self.target_sense_critic1, self.sense_critic1, self.tau)
        soft_update(self.target_sense_critic2, self.sense_critic2, self.tau)
        soft_update(self.target_rendezvous_controller, self.rendezvous_controller, self.tau)


def setup_logging():
    """设置日志记录"""
    if not os.path.exists('logs'):
        os.makedirs('logs')

    logging.basicConfig(
        level=logging.INFO,
        format='%(message)s',
        handlers=[
            logging.FileHandler('logs/training.log', mode='w'),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger()


def plot_and_save_results(episode, rewards, uav_positions, mecv_positions, env):
    """绘制并保存训练结果"""
    if not os.path.exists('plots'):
        os.makedirs('plots')

    plt.style.use('default')
    plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False

    fig, axes = plt.subplots(2, 2, figsize=(15, 12))

    # 奖励曲线
    axes[0, 0].plot(rewards, 'b-', linewidth=1)
    axes[0, 0].set_title(f'Training Rewards (Episode {episode + 1})')
    axes[0, 0].set_xlabel('Episode')
    axes[0, 0].set_ylabel('Cumulative Reward')
    axes[0, 0].grid(True, alpha=0.3)

    # 移动平均奖励
    if len(rewards) >= 10:
        window = min(50, len(rewards))
        moving_avg = np.convolve(rewards, np.ones(window) / window, mode='valid')
        axes[0, 1].plot(range(window - 1, len(rewards)), moving_avg, 'r-', linewidth=2)
        axes[0, 1].set_title(f'Moving Average Rewards (Window: {window})')
        axes[0, 1].set_xlabel('Episode')
        axes[0, 1].set_ylabel('Moving Average Reward')
        axes[0, 1].grid(True, alpha=0.3)

    # UAV轨迹
    if uav_positions and len(uav_positions) > 0:
        uav_x, uav_y = zip(*uav_positions[-1])
        axes[1, 0].plot(uav_x, uav_y, 'b-', linewidth=2, alpha=0.7, label='UAV Trajectory')
        axes[1, 0].scatter(uav_x[0], uav_y[0], c='blue', s=100, marker='o', label='Start')
        axes[1, 0].scatter(uav_x[-1], uav_y[-1], c='darkblue', s=100, marker='*', label='End')
        axes[1, 0].scatter(env.pois[:, 0], env.pois[:, 1], c='green', s=80, marker='s', label='POIs')
        axes[1, 0].scatter(env.cps[:, 0], env.cps[:, 1], c='red', s=80, marker='^', label='CPs')
        axes[1, 0].set_xlim(0, env.map_size)
        axes[1, 0].set_ylim(0, env.map_size)
        axes[1, 0].set_title('UAV Trajectory')
        axes[1, 0].legend(loc='upper right', fontsize=8)
        axes[1, 0].grid(True, alpha=0.3)

        # MECV轨迹
        mecv_x, mecv_y = zip(*mecv_positions[-1])
        axes[1, 1].plot(mecv_x, mecv_y, 'm--', linewidth=2, alpha=0.7, label='MECV Trajectory')
        axes[1, 1].scatter(mecv_x[0], mecv_y[0], c='magenta', s=100, marker='o', label='Start')
        axes[1, 1].scatter(mecv_x[-1], mecv_y[-1], c='darkmagenta', s=100, marker='*', label='End')
        axes[1, 1].scatter(env.pois[:, 0], env.pois[:, 1], c='green', s=80, marker='s', label='POIs')
        axes[1, 1].scatter(env.cps[:, 0], env.cps[:, 1], c='red', s=80, marker='^', label='CPs')
        axes[1, 1].set_xlim(0, env.map_size)
        axes[1, 1].set_ylim(0, env.map_size)
        axes[1, 1].set_title('MECV Trajectory')
        axes[1, 1].legend(loc='upper right', fontsize=8)
        axes[1, 1].grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig(f'plots/training_results_ep_{episode + 1}.png', dpi=150, bbox_inches='tight')
    plt.close()  # 关闭图像以节省内存


def log_episode_details(logger, episode, episode_reward, info, agent, episode_steps):
    """记录每个episode的详细信息"""
    step_details = info['step_details']

    # 总时延 = 飞行时间 + 悬停时间 + 计算时间
    total_delay = step_details['flight_time'] + step_details['hover_time'] + step_details['compute_time']

    # 总能耗 = UAV飞行能耗 + UAV悬停能耗 + UAV本地处理能耗 + MECV飞行能耗 + MECV计算能耗
    total_energy = (step_details['flight_energy'] + step_details['hover_energy'] +
                    step_details['local_process_energy'] + step_details['compute_energy'])

    # 细分奖励
    total_positive_reward = (step_details['data_collect_reward'] +
                             step_details['data_process_reward'] +
                             step_details['charge_reward'])
    total_negative_reward = (step_details['time_penalty'] +
                             step_details['energy_penalty'] +
                             step_details['invalid_penalty'])

    log_msg = (
        f"Ep:{episode + 1:4d} | "
        f"POI:{info['pois_visited']:2d}/{info['total_pois']:2d} | "
        f"Steps:{episode_steps:2d} | "
        f"ε:{agent.epsilon:.3f} | "
        f"A_Loss:{agent.actor_loss:.3f} | "
        f"C_Loss:{agent.critic_loss:.3f} | "
        f"R:{episode_reward:6.2f} | "
        f"R+:{total_positive_reward:5.2f} | "
        f"R-:{total_negative_reward:6.2f} | "
        f"Energy:{total_energy:6.1f} | "
        f"E_flight:{step_details['flight_energy']:4.1f} | "
        f"E_hover:{step_details['hover_energy']:4.1f} | "
        f"E_comp:{step_details['compute_energy']:4.1f} | "
        f"Delay:{total_delay:5.2f} | "
        f"D_flight:{step_details['flight_time']:4.2f} | "
        f"D_hover:{step_details['hover_time']:4.2f} | "
        f"D_comp:{step_details['compute_time']:4.2f}"
    )

    logger.info(log_msg)


if __name__ == '__main__':
    # 设置日志
    logger = setup_logging()
    logger.info("=== UAV-MECV Cooperative System Training ===")
    logger.info("Algorithm: SAC + Dueling DQN")
    logger.info(f"Episodes: 1000, Max Steps: {MAX_STEPS_PER_EPISODE}")
    logger.info(f"POIs: {NUM_POIS}, CPs: {NUM_CPS}")
    logger.info("=" * 120)

    # 打印日志头
    logger.info("Ep:Episode | POI:Visited_POIs/Total | Steps | ε:Epsilon | A_Loss:Actor_Loss | C_Loss:Critic_Loss")
    logger.info("R:Total_Reward | R+:Positive_Rewards | R-:Negative_Rewards | Energy:Total_Energy_Consumed")
    logger.info("E_flight/hover/comp:Energy_Components | Delay:Total_Delay | D_flight/hover/comp:Delay_Components")
    logger.info("-" * 120)

    # 初始化环境和智能体
    env = UAV_MEC_Env()
    state_dim = len(env._get_state())
    agent = HP_Agent(state_dim=state_dim, num_cps=env.num_cps)

    # 训练参数
    num_episodes = 1000
    episode_rewards = []
    uav_trajectories, mecv_trajectories = [], []

    # 主训练循环
    start_time = time.time()
    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0
        ep_uav_pos = [env.uav_pos.copy()]
        ep_mecv_pos = [env.mecv_pos.copy()]

        for step in range(env.max_steps):
            action = agent.select_action(state, training=True)
            next_state, reward, done, info = env.step(action)
            agent.remember(state, action, reward, next_state, done)

            state = next_state
            episode_reward += reward
            ep_uav_pos.append(env.uav_pos.copy())
            ep_mecv_pos.append(env.mecv_pos.copy())

            agent.train()

            if done:
                break

        episode_rewards.append(episode_reward)
        uav_trajectories.append(ep_uav_pos)
        mecv_trajectories.append(ep_mecv_pos)

        # 每个episode记录详细日志
        log_episode_details(logger, episode, episode_reward, info, agent, step + 1)

        # 每100轮绘制并保存图片
        if (episode + 1) % 100 == 0:
            elapsed_time = time.time() - start_time
            avg_reward = np.mean(episode_rewards[-100:])

            logger.info("-" * 120)
            logger.info(f"=== Episode {episode + 1} Summary ===")
            logger.info(f"Average Reward (Last 100): {avg_reward:.3f}")
            logger.info(f"Time Elapsed: {elapsed_time:.1f}s")
            logger.info(f"Generating plots...")

            plot_and_save_results(episode, episode_rewards, uav_trajectories, mecv_trajectories, env)
            logger.info(f"Plot saved: plots/training_results_ep_{episode + 1}.png")
            logger.info("-" * 120)

            start_time = time.time()

    # 训练完成
    logger.info("=" * 120)
    logger.info("=== Training Completed ===")
    final_avg_reward = np.mean(episode_rewards[-100:]) if len(episode_rewards) >= 100 else np.mean(episode_rewards)
    logger.info(f"Final Average Reward: {final_avg_reward:.3f}")
    logger.info(f"Best Episode Reward: {max(episode_rewards):.3f}")
    logger.info(f"Total Episodes: {num_episodes}")

    # 保存最终模型
    try:
        if not os.path.exists('models'):
            os.makedirs('models')
        torch.save({
            'meta_controller': agent.meta_controller.state_dict(),
            'sense_actor': agent.sense_actor.state_dict(),
            'sense_critic1': agent.sense_critic1.state_dict(),
            'sense_critic2': agent.sense_critic2.state_dict(),
            'rendezvous_controller': agent.rendezvous_controller.state_dict(),
            'episode_rewards': episode_rewards,
        }, 'models/hp_agent_final_model.pth')
        logger.info("Model saved: models/hp_agent_final_model.pth")
    except Exception as e:
        logger.error(f"Failed to save model: {e}")

    logger.info("=" * 120)

