import os
import torch
from datetime import datetime
from torch.utils.tensorboard import SummaryWriter

from envs.env import Env
from framework.buffer import ReplayBuffer
from framework.trainer import Trainer, TrainerConfig
from framework.utils import set_seed
from models.gpt_model import GPT, GPTConfig


def main():
    # ==== 基本参数设置 ====
    seed = 123
    map_name = "2s3z"
    quality = "good"
    project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    offline_data_dir = os.path.join(project_root, "offline_data", map_name, quality)
    log_dir = os.path.join(project_root, f"logs/{map_name}_{quality}_{datetime.now().strftime('%m%d_%H%M%S')}")
    pretrain_model_dir = os.path.join(project_root, f"offline_model/{map_name}/")
    print("[DEBUG] 读取离线数据路径:", offline_data_dir)

    set_seed(seed)
    torch.set_num_threads(8)

    # ==== 环境与模型参数 ====
    eval_env = Env(n_threads=1)  # 使用默认参数

    # 修正观察维度 (SMAC 2s3z 实际值)
    global_obs_dim = 168
    local_obs_dim = 80
    action_dim = 9

    block_size = 3  # context_length * 3
    target_rtgs = 20.  # 目标回报

    # ==== 模型构建 ====
    actor_conf = GPTConfig(local_obs_dim, action_dim, block_size,
                           n_layer=2, n_head=2, n_embd=32,
                           model_type="state_only", max_timestep=400)
    critic_conf = GPTConfig(global_obs_dim, action_dim, block_size,
                            n_layer=2, n_head=2, n_embd=32,
                            model_type="state_only", max_timestep=400)

    actor = GPT(actor_conf, model_type="actor")
    critic = GPT(critic_conf, model_type="critic")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    actor = torch.nn.DataParallel(actor).to(device)
    critic = torch.nn.DataParallel(critic).to(device)

    # ==== 数据加载 ====
    buffer = ReplayBuffer(block_size, global_obs_dim, local_obs_dim, action_dim)

    # 修改数据加载方式，允许少量数据
    print(f"[INFO] Reading from: {offline_data_dir}")
    buffer.load_offline_data([offline_data_dir], [42], max_epi_length=400)  # 使用实际的42个episode

    dataset = buffer.sample()
    dataset.stats()

    # ==== 训练器配置 ====
    config = TrainerConfig(
        max_epochs=10,
        batch_size=128,
        learning_rate=5e-4,
        num_workers=0,
        mode="offline"
    )
    trainer = Trainer(actor, critic, config)
    writer = SummaryWriter(log_dir)

    # ==== Offline Training ====
    print("开始 MADT offline 训练...")
    for epoch in range(config.max_epochs):
        actor_loss, critic_loss, _, __, ___ = trainer.train(dataset, train_critic=True)
        print(f"[Epoch {epoch}] actor_loss={actor_loss:.4f}, critic_loss={critic_loss:.4f}")

        writer.add_scalar("offline/actor_loss", actor_loss, epoch)
        writer.add_scalar("offline/critic_loss", critic_loss, epoch)

        # 评估频率调整为每5个epoch
        if epoch % 5 == 0:
            from framework.rollout import RolloutWorker
            rollout_worker = RolloutWorker(actor, critic, buffer, global_obs_dim, local_obs_dim, action_dim)
            avg_return, win_rate, _ = rollout_worker.rollout(eval_env, target_rtgs, train=False)
            print(f"→ Eval: return={avg_return:.2f}, win_rate={win_rate:.2f}")
            writer.add_scalar("offline/avg_return", avg_return, epoch)
            writer.add_scalar("offline/win_rate", win_rate, epoch)

        # 保存模型
        os.makedirs(pretrain_model_dir, exist_ok=True)
        if epoch % 2 == 0:  # 每2个epoch保存一次
            torch.save(actor.state_dict(), os.path.join(pretrain_model_dir, f"actor_epoch{epoch}.pt"))
            torch.save(critic.state_dict(), os.path.join(pretrain_model_dir, f"critic_epoch{epoch}.pt"))

    print("✅ offline 训练完成。模型保存在：", pretrain_model_dir)


if __name__ == "__main__":
    import multiprocessing

    multiprocessing.freeze_support()
    main()