# -*- coding: UTF-8 -*-
import os
import argparse
import copy
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import env_tina as environment
import config
from ppo_gnn_tina import PPO_GNN
import warnings
import torch

warnings.filterwarnings("ignore", category=UserWarning)
torch.set_warn_always(False)


def save_model(agent, path, checkpoint_info=None):  
    """  
    保存模型检查点，包含训练详细信息  

    :param agent: PPO_GNN 代理  
    :param path: 保存路径  
    :param checkpoint_info: 包含训练信息的字典  
    """  
    if checkpoint_info is None:  
        checkpoint_info = {}  

    # 加载之前的检查点信息（如果存在）  
    try:  
        previous_checkpoint = torch.load(path) if os.path.exists(path) else {}  
    except:  
        previous_checkpoint = {}  

    # 定义累积存储的字段  
    cumulative_fields = [  
        "policy_losses",  
        "value_losses",  
        "rewards",  
        "cloud_counts",  
        "migration_delays",  
        "download_times",  
        "trans_delays",  
        "waiting_times",  
        "computation_delays",  
        "arrange_delays",  
        "scaling_delays",  
        "backhaul_delays",  
    ]  

    # 构建累积检查点信息  
    cumulative_checkpoint_info = {}  
    for field in cumulative_fields:  
        # 合并之前的数据和新的数据  
        previous_data = previous_checkpoint.get(field, [])  
        # print(field,"previous_data:",previous_data)
        new_data = checkpoint_info.get(field, None)  

        # 对于policy和value losses，计算平均值  
        if field in ["policy_losses", "value_losses"]:   
            avg_loss = new_data if new_data is not None else 0  
            cumulative_checkpoint_info[field] = previous_data + [avg_loss]  
        elif new_data is not None:  
            cumulative_checkpoint_info[field] = previous_data + [new_data]  
        else:  
            cumulative_checkpoint_info[field] = previous_data  

    checkpoint = {  
        # 网络和优化器状态  
        "actor_state_dict": agent.actor_net.state_dict(),  
        "critic_state_dict": agent.critic_net.state_dict(),  
        "actor_optimizer_state_dict": agent.actor_optimizer.state_dict(),  
        "critic_optimizer_state_dict": agent.critic_optimizer.state_dict(),  
        # 累积的训练相关信息  
        **cumulative_checkpoint_info,  
        # 训练元信息  
        "total_epochs": checkpoint_info.get("total_epochs", 0),  
    }  
    # print("checkpoint[policy_losses]:",checkpoint["policy_losses"])

    # 保存检查点  
    torch.save(checkpoint, path)  


def load_model(agent, path):  
    """  
    加载模型检查点，包括网络状态和训练历史信息  

    :param agent: PPO_GNN 代理  
    :param path: 检查点路径  
    :return: 加载的 epoch 和累积的训练数据  
    """  
    try:  
        checkpoint = torch.load(path)  
    except Exception as e:  
        print(f"加载模型失败: {e}")  
        return 0, {}  
    
    # 恢复网络和优化器状态  
    try:  
        agent.actor_net.load_state_dict(checkpoint["actor_state_dict"])  
        agent.critic_net.load_state_dict(checkpoint["critic_state_dict"])  
        agent.actor_optimizer.load_state_dict(checkpoint["actor_optimizer_state_dict"])  
        agent.critic_optimizer.load_state_dict(checkpoint["critic_optimizer_state_dict"])  
    except Exception as e:  
        print(f"恢复网络状态失败: {e}")  
    
    # 提取累积的训练历史数据  
    cumulative_data = {}  
    cumulative_fields = [  
        "policy_losses",  
        "value_losses",  
        "rewards",  
        "cloud_counts",  
        "migration_delays",  
        "download_times",  
        "trans_delays",  
        "waiting_times",  
        "computation_delays",  
        "arrange_delays",  
        "scaling_delays",  
        "backhaul_delays",  
    ]  
    
    for field in cumulative_fields:  
        if field in checkpoint:  
            cumulative_data[field] = checkpoint[field]  
    
    # 获取训练轮次  
    total_epochs = checkpoint.get("total_epochs", 0)  
    
    print(f"成功加载模型检查点(轮次: {total_epochs})")  

    return total_epochs, cumulative_data  


def main(args, env_name, number, ver):  
    # 初始化环境和Agent  
    env = environment.Env()  
    obs, _ = env.reset(args.task)  
    agent = PPO_GNN(env)  

    rewards_list = []  
    total_epochs = 0  
    best_reward = float("-inf")  
    best_epoch_info = None  

    if args.resume:  
        save_dir = f"checkpoint/{env_name}_number_{number}_gnn/"  
        
        # 自动找到最新的checkpoint文件
        checkpoint_path = None
        if os.path.exists(save_dir):
            checkpoint_files = [f for f in os.listdir(save_dir) if f.startswith("epoch_") and f.endswith(".pth")]
            if checkpoint_files:
                # 提取epoch数字并排序，找到最新的
                epoch_numbers = []
                for f in checkpoint_files:
                    try:
                        epoch_num = int(f.replace("epoch_", "").replace(".pth", ""))
                        epoch_numbers.append((epoch_num, f))
                    except ValueError:
                        continue
                
                if epoch_numbers:
                    # 找到最大的epoch数字对应的文件
                    latest_epoch, latest_file = max(epoch_numbers, key=lambda x: x[0])
                    checkpoint_path = os.path.join(save_dir, latest_file)
                    print(f"找到最新的checkpoint: {latest_file} (epoch {latest_epoch})")
        
        # 使用新的load_model函数，获取轮次和训练历史  
        if checkpoint_path and os.path.exists(checkpoint_path):
            loaded_epochs, training_history = load_model(agent, checkpoint_path)
        else:
            print("未找到有效的checkpoint文件，从头开始训练")
            loaded_epochs, training_history = 0, {}  
        
        if training_history:  
            rewards_list = training_history.get("rewards", [])           
            total_epochs = loaded_epochs  
            if rewards_list:  
                best_reward = max(rewards_list)  
                best_index = rewards_list.index(best_reward)  
                print(f"从检查点恢复 - 当前最佳奖励: {best_reward:.4f} (轮次 {best_index})")  
        
    # 存储每轮的详细信息  
    checkpoint_epochs_info = {}  

    while total_epochs < args.episodes:  
        print(f"\n第 {total_epochs+1} 轮实验")  

        # 设置随机种子  
        args.seed = 10 + 24 
        env.seed(args.seed)  
        np.random.seed(args.seed)  
        torch.manual_seed(args.seed)  

        # 重置环境  
        obs, _ = env.reset(args.task)  

        # 初始化单轮指标  
        episode_steps = 0  
        ep_reward = 0  
        ep_migration = 0  
        ep_total_download_time = 0  
        ep_trans = 0  
        ep_waiting_time = 0  
        ep_total_download_size = 0  
        ep_computation_delay = 0  
        ep_arrange_delay = 0  
        ep_scaling_delay = 0  
        ep_backhaul_delay = 0  
        done = False  
        ep_policy_loss = []  
        ep_value_loss = []  

        print("####### GNN PPO Training #########")  
        cloud_count = 0  
        
        # 步骤详细信息表头，仅在第一轮打印  
        if total_epochs == 0:  
            print("\n" + "="*80)  
            print("| {:<4} | {:<10} | {:<10} | {:<14} | {:<14} | {:<14} | {:<10} |".format(  
                "步骤", "Action", "Reward", "Policy Loss", "Value Loss", "累计奖励", "云调用"  
            ))  
            print("=" + "="*79)  
        policy_loss = 0
        value_loss = 0
        while not done:  
            # 选择动作  
            a, a_logprob = agent.select_action(env, obs)  
            if a == config.EDGE_NODE_NUM:  
                cloud_count += 1  

            # 执行动作  
            obs_, reward, info, done, details = env.step(a)  

            # 更新状态  
            obs = obs_  
            r = -reward  

            # 累积指标  
            ep_reward += r * 1000  
            ep_migration += details[0] * 1000  
            ep_total_download_time += details[1] * 1000  
            ep_trans += details[2] * 1000  
            ep_waiting_time += details[3] * 1000  
            ep_total_download_size += details[4]  
            ep_computation_delay += details[5] * 1000  
            ep_arrange_delay += details[6] * 1000  
            ep_scaling_delay += details[7] * 1000  
            ep_backhaul_delay += details[8] * 1000  

            # 第一轮打印每一步的详细信息  
            temp_policy_loss = 0  
            temp_value_loss = 0  

            # if total_epochs == 0:  
            #     if episode_steps % 50 == 0 and episode_steps > 0:   
            #         temp_policy_loss, temp_value_loss = agent.compute_loss(  
            #             obs, a, a_logprob, r, obs_, done  
            #         ) 
            #         policy_loss += temp_policy_loss
            #         value_loss += temp_value_loss
            #         print("| {:<4} | {:<10} | {:<10.4f} | {:<14.6f} | {:<14.6f} | {:<14.4f} | {:<10} |".format(  
            #         episode_steps,  
            #         str(a),  
            #         r * 1000,  
            #         policy_loss/episode_steps,  
            #         value_loss/episode_steps,  
            #         ep_reward/episode_steps,  
            #         cloud_count  
            #     )) 
            
            # 存储转移并可能更新  
            trans = (obs, a, a_logprob, r, obs_, done, done)  
            if agent.store_transition(trans):  
                temp_policy_loss, temp_value_loss = agent.update()  
                ep_policy_loss.append(temp_policy_loss)  
                ep_value_loss.append(temp_value_loss)   

            episode_steps += 1  

        # 第一轮结束后打印分隔线  
        if total_epochs == 0:  
            print("=" * 80 + "\n")  

        # 计算并记录指标  
        tmp_r = ep_reward / args.task  
        tmp_migration = ep_migration / args.task  
        tmp_total_download_time = ep_total_download_time / args.task  
        tmp_trans = ep_trans / args.task  
        tmp_waiting_time = ep_waiting_time / args.task  
        tmp_total_download_size = ep_total_download_size / args.task  
        tmp_computation_delay = ep_computation_delay / args.task  
        tmp_arrange_delay = ep_arrange_delay / args.task  
        tmp_scaling_delay = ep_scaling_delay / args.task  
        tmp_backhaul_delay = ep_backhaul_delay / args.task  

        avg_policy_loss = np.mean(ep_policy_loss) if ep_policy_loss else 0  
        avg_value_loss = np.mean(ep_value_loss) if ep_value_loss else 0  

        # 汇总本轮训练结果  
        print(f"\n第 {total_epochs+1} 轮训练总结:")  
        print(f"  总步数: {episode_steps}")  
        print(f"  平均Policy Loss: {avg_policy_loss:.6f}")  
        print(f"  平均Value Loss: {avg_value_loss:.6f}")  
        print(f"  平均奖励: {tmp_r:.4f}")  
        print(f"  云调用次数: {cloud_count}")  

        checkpoint_epochs_info[total_epochs] = {  
            "policy_losses": avg_policy_loss,  
            "value_losses": avg_value_loss,  
            "rewards": tmp_r,  
            "cloud_counts": cloud_count,  
            "migration_delays": tmp_migration,  
            "download_times": tmp_total_download_time,  
            "trans_delays": tmp_trans,  
            "waiting_times": tmp_waiting_time,  
            "computation_delays": tmp_computation_delay,  
            "arrange_delays": tmp_arrange_delay,  
            "scaling_delays": tmp_scaling_delay,  
            "backhaul_delays": tmp_backhaul_delay,  
        }  

        if total_epochs % args.save_modulecheckpoint_freq == 0:  
            save_dir = f"checkpoint/{env_name}_number_{number}_gnn/"  
            os.makedirs(save_dir, exist_ok=True)  
            save_name = f"epoch_{total_epochs}.pth"  
            save_path = os.path.join(save_dir, save_name)  

            # 修改这里的检查点信息  
            checkpoint_info = {  
                "policy_losses": checkpoint_epochs_info[total_epochs]["policy_losses"],  
                "value_losses": checkpoint_epochs_info[total_epochs]["value_losses"],  
                "rewards": checkpoint_epochs_info[total_epochs]["rewards"],  
                "cloud_counts": checkpoint_epochs_info[total_epochs]["cloud_counts"],  
                "migration_delays": checkpoint_epochs_info[total_epochs]["migration_delays"],  
                "download_times": checkpoint_epochs_info[total_epochs]["download_times"],  
                "trans_delays": checkpoint_epochs_info[total_epochs]["trans_delays"],  
                "waiting_times": checkpoint_epochs_info[total_epochs]["waiting_times"],  
                "computation_delays": checkpoint_epochs_info[total_epochs]["computation_delays"],  
                "arrange_delays": checkpoint_epochs_info[total_epochs]["arrange_delays"],  
                "scaling_delays": checkpoint_epochs_info[total_epochs]["scaling_delays"],  
                "backhaul_delays": checkpoint_epochs_info[total_epochs]["backhaul_delays"],  
                "total_epochs": total_epochs  
            }  

            # 使用独立的 save_model 函数，传入累积的检查点信息  
            save_model(agent, save_path, checkpoint_info)  
            print("Checkpoint saved:", save_name)   

        # 更新最佳轮次  
        if tmp_r > best_reward:  
            best_reward = tmp_r  
            best_epoch_info = checkpoint_epochs_info[total_epochs]  
            print(f"找到新的最佳轮次! 奖励值: {best_reward:.4f}")  
            
        total_epochs += 1  

    return best_epoch_info  

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="PPO-GNN Training Script")
    parser.add_argument("--episodes", type=int, default=2000, help="训练轮数")
    parser.add_argument("--resume", type=str, default=True, help="恢复训练的检查点路径")
    parser.add_argument(
        "--save_modulecheckpoint_freq", type=int, default=1, help="模型保存频率"
    )
    parser.add_argument("--task", type=int, default=config.TASK_NUM, help="任务数量")
    parser.add_argument("--batch_size", type=int, default=64, help="训练批次大小")
    parser.add_argument("--seed", type=int, default=10, help="随机种子")

    args = parser.parse_args()

    # 运行主函数并获取结果
    best_epoch_results = main(args, env_name="PPO_GNN", number="1500", ver="gnn")

    # 打印最终结果
    print("\n### 最佳轮次训练结果总结 ###")
    print(f"平均奖励: {best_epoch_results['rewards']}")
    print(f"平均迁移延迟: {best_epoch_results['migration_delays']} ms")
    print(f"平均下载时间: {best_epoch_results['download_times']} ms")
    print(f"平均传输延迟: {best_epoch_results['trans_delays']} ms")
    print(f"平均等待时间: {best_epoch_results['waiting_times']} ms")
    print(f"云调用次数: {best_epoch_results['cloud_counts']}")
    print(f"平均计算延迟: {best_epoch_results['computation_delays']} ms")
    print(f"平均调度延迟: {best_epoch_results['arrange_delays']} ms")
    print(f"平均扩展延迟: {best_epoch_results['scaling_delays']} ms")
    print(f"平均回程延迟: {best_epoch_results['backhaul_delays']} ms")

    # 如果需要，还可以打印损失信息
    print("\n损失信息:")
    print(f"Policy Losses: {best_epoch_results['policy_losses']}")
    print(f"Value Losses: {best_epoch_results['value_losses']}")
