import os
import torch
from datetime import datetime
from torch.utils.tensorboard import SummaryWriter

from madt.sc2.framework.buffer import ReplayBuffer
from madt.sc2.framework.trainer import Trainer, TrainerConfig
from madt.sc2.framework.utils import set_seed
from madt.sc2.models.gpt_model import GPT, GPTConfig
from madt.sc2.framework.dim_infer import infer_dims_from_file
# ==== 参数配置 ====
offline_data_dir = "offline_data/2s3z/good/"  # 修改为你的地图路径
save_model_dir = "offline_model/2s3z/"
log_dir = "logs/2s3z/" + datetime.now().strftime("[%m-%d]%H.%M.%S")

seed = 42
context_length = 1
offline_epochs = 20
batch_size = 128
learning_rate = 5e-4
max_timestep = 400
block_size = context_length * 3

# ==== 设置随机种子和设备 ====
set_seed(seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"✅ Using device: {device}")


offline_data_pattern = "offline_data/2s3z/good/*.json"
global_obs_dim, local_obs_dim, action_dim = infer_dims_from_file(offline_data_pattern)
print(f"📐 推断维度：global={global_obs_dim}, local={local_obs_dim}, action={action_dim}")


# ==== 创建日志器 ====
writer = SummaryWriter(log_dir)

# ==== 创建模型 ====
actor_conf = GPTConfig(
    input_dim=local_obs_dim,
    vocab_size=action_dim,
    block_size=block_size,
    n_layer=2,
    n_head=2,
    n_embd=32,
    state_size=local_obs_dim,
    model_type="state_only",
    max_timestep=max_timestep
)

critic_conf = GPTConfig(
    input_dim=global_obs_dim,
    vocab_size=action_dim,
    block_size=block_size,
    n_layer=2,
    n_head=2,
    n_embd=32,
    state_size=global_obs_dim,
    model_type="state_only",
    max_timestep=max_timestep
)

actor_model = GPT(actor_conf, model_type="actor")
critic_model = GPT(critic_conf, model_type="critic")

if torch.cuda.is_available():
    actor_model = torch.nn.DataParallel(actor_model).to(device)
    critic_model = torch.nn.DataParallel(critic_model).to(device)

# ==== 加载轨迹数据 ====
buffer = ReplayBuffer(block_size, global_obs_dim, local_obs_dim, action_dim)
buffer.load_offline_data([offline_data_dir], [200], max_epi_length=max_timestep)
dataset = buffer.sample()
dataset.stats()

# ==== 设置训练器 ====
trainer_config = TrainerConfig(max_epochs=offline_epochs, batch_size=batch_size,
                               learning_rate=learning_rate, num_workers=0, mode="offline")
trainer = Trainer(actor_model, critic_model, trainer_config)

# ==== 开始微调训练 ====
for epoch in range(offline_epochs):
    actor_loss, critic_loss, _, _, _ = trainer.train(dataset, train_critic=True)
    print(f"[Epoch {epoch}] Actor Loss: {actor_loss:.4f} | Critic Loss: {critic_loss:.4f}")
    writer.add_scalar("loss/actor", actor_loss, epoch)
    writer.add_scalar("loss/critic", critic_loss, epoch)

    # 保存模型
    save_actor_path = os.path.join(save_model_dir, "actor")
    save_critic_path = os.path.join(save_model_dir, "critic")
    os.makedirs(save_actor_path, exist_ok=True)
    os.makedirs(save_critic_path, exist_ok=True)

    torch.save(actor_model.state_dict(), os.path.join(save_actor_path, f"{epoch}.pkl"))
    torch.save(critic_model.state_dict(), os.path.join(save_critic_path, f"{epoch}.pkl"))

writer.close()
print("✅ Offline fine-tuning finished.")
