import os
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
from torch import nn
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import torch as th
from env_grid import AirCombatEnv,CustomTensorboardCallback
from stable_baselines3.common.callbacks import EvalCallback

# tensorboard --logdir="D:\chiken\Stable_train\tensorboard_log"
# tensorboard --logdir=".\tensorboard_log"


from stable_baselines3.common.callbacks import BaseCallback

class CustomMLPExtractor(BaseFeaturesExtractor):
    def __init__(self, observation_space, features_dim=256):
        super().__init__(observation_space, features_dim)

        self.net = nn.Sequential(
            nn.Linear(observation_space.shape[0], 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
        )
        self._features_dim = 128  # 最后一层输出大小

    def forward(self, observations: th.Tensor) -> th.Tensor:
        return self.net(observations)

policy_kwargs = dict(
    features_extractor_class=CustomMLPExtractor,
    features_extractor_kwargs=dict(features_dim=128),
)

class CheckpointCallback(BaseCallback):
    def __init__(self, save_freq: int, save_path: str, verbose=0):
        super().__init__(verbose)
        self.save_freq = save_freq
        self.save_path = save_path

        # 创建保存目录
        os.makedirs("./model", exist_ok=True)

    def _on_step(self) -> bool:
        if self.n_calls % self.save_freq == 0:
            # 使用完整Windows路径
            model_path = os.path.join(
                "./model",
                f"ppo_aircombat_{self.num_timesteps}_steps.zip"
            )
            self.model.save(model_path)
            print(f"模型已保存至：{model_path}")
        return True

def main():
    env = AirCombatEnv(max_episode_steps=1024)  # 创建环境
    tensorboard_log=r'./tensorboard_log/'  #PPO的tensoorboard更新频率为n_steps*log_interval
    model = PPO(
        "MlpPolicy",
        env,
        tensorboard_log=tensorboard_log,
        verbose=1,
        # ent_coef=0.01,  #噪声系数
    )
    # 配置保存参数
    custom_callback = CustomTensorboardCallback(verbose=1)
    checkpoint_callback = CheckpointCallback(
        save_freq=50000,
        save_path="./model",  # 直接使用完整路径
        verbose=1
    )
    model.learn(total_timesteps=10000000, log_interval=1, callback=[custom_callback,checkpoint_callback])
    model.save("Plane_battle_model_PPO")  # 保存模型
    test_model(model,env)  # 测试模型



def test_model(model, training_env=None):
    # 使用训练环境或新建环境
    if training_env is None:
        env = AirCombatEnv()
    else:
        env = training_env

    obs, _ = env.reset()
    terminated, truncated = False, False
    total_reward = 0

    while not (terminated or truncated):
        action, _ = model.predict(obs, deterministic=True)
        obs, reward, terminated, truncated, info = env.step(action)
        total_reward += reward
        env.render()  # 确保实现了 render() 方法

    print(f'Total Reward: {total_reward}')
    env.close()


if __name__ == "__main__":
    main()


