import os
import time
import numpy as np
import torch
import gymnasium as gym
from datetime import datetime
from MAPPO import MAPPO, RolloutBuffer
from WRP_env import WRPEnvMultiAgent
import pandas as pd


def log_episode_result(path, episode, episode_reward, actions, storages):
    """将动作和Storage序列化为字符串（逗号分隔）"""
    # 序列化动作（假设每个动作是4维数组）
    actions_str = ';'.join([
        ','.join([f"{a.item():.4f}" for a in action])  # 使用.item()转换标量
        for action in actions
    ])
    # 序列化Storage（假设Storage是4维数组）
    storages_str = ';'.join([
        ','.join([f"{s.item():.4f}" for s in storage])  # 使用.item()转换标量
        for storage in storages
    ])
    
    with open(path, 'a') as f:
        f.write(
            f"{episode},{episode_reward:.2f},"
            f"\"{actions_str}\",\"{storages_str}\","
            f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
        )

def load_WRP_problem(problem_name: str):
    """加载水库问题数据"""
    benefit_df = pd.read_csv(f"problems/{problem_name}/benefit.csv").values
    reservoir_df = pd.read_csv(f"problems/{problem_name}/reservation.csv")
    return benefit_df, reservoir_df

def train():
    print("=============== MAPPO Training ===============")
    
    ################### 超参数配置 ###################
    env_name = "WRP"
    has_continuous_action_space = True
    max_episodes = 500000            # 最大训练episode
    max_ep_len = 1000              # 每个episode最大步长
    update_timestep = 60           # 每多少步更新策略
    save_model_freq = 10000          # 保存模型频率(episode)
    log_freq = 1000                  # 日志记录频率(episode)
    
    # MAPPO参数
    lr_actor = 1e-4
    lr_critic = 3e-4
    gamma = 0.96
    K_epochs = 10
    eps_clip = 0.1
    
    # 动作噪声参数
    action_std_init = 0.6
    action_std_decay_rate = 0.02
    min_action_std = 0.1
    
    ################### 环境初始化 ###################
    benefit_df, reservoir_df = load_WRP_problem(env_name)
    Remin = reservoir_df['Remin'].values
    Remax = reservoir_df['Remax'].values
    Smin = reservoir_df['Smin'].values
    Smax = reservoir_df['Smax'].values
    Sinitial = reservoir_df['Sinitial'].values
    Starget = reservoir_df['Starget'].values
    Qr = reservoir_df['Qr'].values
    punish_rate = 5
    
    # 创建多智能体环境
    env = WRPEnvMultiAgent(
        benefit_df=benefit_df,
        Remin=Remin,
        Remax=Remax,
        Smin=Smin,
        Smax=Smax,
        Sinitial=Sinitial,
        Starget=Starget,
        Qr=Qr,
        punish_rate=punish_rate
    )
    
    ################### MAPPO初始化 ###################
    action_bounds = [(Remin[i], Remax[i]) for i in range(4)]
    
    mappo = MAPPO(
        num_agents=4,
        actor_dims=[5]*4,           # 每个智能体的观测维度
        global_state_dim=13,        # Qr(4)+Storage(4)+Re(4)+time=13
        action_dims=[1]*4,
        action_bounds=action_bounds,
        lr_actor=lr_actor,
        lr_critic=lr_critic,
        gamma=gamma,
        K_epochs=K_epochs,
        eps_clip=eps_clip,
        action_std_init=action_std_init, 
        action_std_decay_rate=action_std_decay_rate, 
        min_action_std=min_action_std
    )
    
    ################### 日志记录 ###################
    log_dir = "MAPPO_logs"
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    
    log_dir = os.path.join(log_dir, env_name)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    run_num = 0
    current_num_files = next(os.walk(log_dir))[2]
    run_num = len(current_num_files)

    log_f_name = log_dir + '/PPO_' + env_name + "_log_" + str(run_num) + ".csv"
    print("日志文件路径:", log_f_name)
    with open(log_f_name, 'w') as f:
        f.write("episode,episode_reward,actions,storages,timestamp\n") 
    
    ################### 模型保存 ###################
    model_save_dir = "MAPPO_preTrained"
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    ################### 训练循环 ###################
    start_time = datetime.now()
    print("训练开始时间:", start_time)
    
    time_step = 0
    print_running_reward = 0
    print_running_episodes = 0
    
    log_running_reward = 0
    log_running_episodes = 0
    
    # 训练主循环
    for episode in range(max_episodes):
        # 重置环境
        agent_obs = env.reset()
        global_state = env.prepare_global_state()
        
        episode_reward = 0
        episode_actor_loss = 0
        episode_critic_loss = 0
        episode_actions = []  # 新增：存储本回合所有动作
        episode_storages = []  # 新增：存储本回合每次动作后的Storage值
        
        for t in range(max_ep_len):
            time_step += 1
            
            # ================= 收集经验 =================
            actions = []
            logprobs = []
            
            # 每个智能体选择动作
            for agent_id in range(4):
                obs = agent_obs[agent_id]
                action, logprob = mappo.select_action(agent_id, obs)
                actions.append(action)
                logprobs.append(logprob)
            
            # 执行动作
            next_agent_obs, rewards, dones, info = env.step(actions)
            next_global_state = env.prepare_global_state()
            
             # 记录当前动作和更新后的Storage
            episode_actions.append(actions)  # 记录动作（格式：list of arrays）
            episode_storages.append(env.state['Storage'].copy())  # 记录Storage
            # 存储经验
            for agent_id in range(4):
                mappo.store_transition(
                    agent_id=agent_id,
                    state=agent_obs[agent_id],
                    action=actions[agent_id],
                    logprob=logprobs[agent_id],
                    reward=rewards[agent_id],
                    next_state=next_agent_obs[agent_id],
                    done=dones[agent_id]
                )
            
            # ================= 策略更新 =================
            if time_step % update_timestep == 0:
                actor_loss, critic_loss = mappo.update(global_state)
                episode_actor_loss += actor_loss
                episode_critic_loss += critic_loss
            
            # 更新状态
            agent_obs = next_agent_obs
            global_state = next_global_state
            episode_reward += sum(rewards)
            
            # 检查是否结束
            if all(dones):
                # print(f"Episode: {episode+1}, Reward: {episode_reward:.2f}")
                break
        
        # ================= 日志记录 =================
        # 打印统计信息
        print_running_reward += episode_reward
        print_running_episodes += 1
        
        log_running_reward += episode_reward
        log_running_episodes += 1
        
        log_episode_result(
            log_f_name,
            episode+1,
            episode_reward,
            episode_actions,  # 传递动作列表
            episode_storages   # 传递Storage列表
        )
        
        # 定期保存模型
        if (episode+1) % save_model_freq == 0:
            model_path = os.path.join(model_save_dir, f"MAPPO_{env_name}_episode{episode+1}.pth")
            torch.save({
                'global_critic': mappo.critic.state_dict(),
                'actors': [actor.state_dict() for actor in mappo.actors],
            }, model_path)
        
        # 打印训练信息
        if (episode+1) % log_freq == 0:
            # 计算统计量
            print_avg_reward = print_running_reward / print_running_episodes
            print_avg_actor_loss = episode_actor_loss / print_running_episodes
            print_avg_critic_loss = episode_critic_loss / print_running_episodes
            
            # 输出到控制台
            print("-"*50)
            print(f"Episode: {episode+1}")
            print(f"Avg Reward: {print_avg_reward:.2f}")
            print(f"Actor Loss: {print_avg_actor_loss:.4f}")
            print(f"Critic Loss: {print_avg_critic_loss:.4f}")
            print(f"Time Elapsed: {datetime.now() - start_time}")
            
            # 重置统计量
            print_running_reward = 0
            print_running_episodes = 0
            
    # 训练结束保存最终模型
    final_model_path = os.path.join(model_save_dir, f"MAPPO_{env_name}_final.pth")
    torch.save({
        'global_critic': mappo.critic.state_dict(),
        'actors': [actor.state_dict() for actor in mappo.actors],
    }, final_model_path)
    
    print("训练总用时:", datetime.now() - start_time)

if __name__ == "__main__":
    train()