import numpy as np
import torch
import gymnasium as gym
import os
from datetime import datetime
from dqn_agent import DQNAgent  # 假设agent类已保存为dqn_agent.py

def train_and_save():
    # 初始化环境和智能体
    env = gym.make('CartPole-v1')
    agent = DQNAgent(env)
    
    # 创建保存目录
    save_dir = f"saved_models/{datetime.now().strftime('%Y%m%d_%H%M%S')}"
    os.makedirs(save_dir, exist_ok=True)

    # 训练参数
    train_params = {
        'episodes': 200,
        'max_steps': 500,
        'update_interval': 10,
        'gamma': 0.99,
        'lr': 1e-3
    }
    
    # 开始训练
    rewards, losses = [], []
    for ep in range(train_params['episodes']):
        state, info = env.reset()
        ep_reward = 0
        ep_loss = 0
        
        for step in range(train_params['max_steps']):
            action = agent.select_action(state)
            next_state, reward, done, _, _ = env.step(action)
            
            # 自定义奖励函数
            x, x_dot, theta, theta_dot = next_state
            reward = 1 - abs(theta)/0.4189
            
            agent.buffer.push(state, action, reward, next_state, done)
            
            loss = agent.update_model()
            if loss:
                ep_loss += loss
                
            state = next_state
            ep_reward += reward
            
            if done:
                break

        # 定期更新目标网络
        if ep % train_params['update_interval'] == 0:
            agent.update_target_net()
        
        rewards.append(ep_reward)
        losses.append(ep_loss/max(step,1))
        
        # 保存中间模型
        if ep % 100 == 0 or ep == train_params['episodes']-1:
            torch.save({
                'policy_net': agent.policy_net.state_dict(),
                'target_net': agent.target_net.state_dict(),
                'optimizer': agent.optimizer.state_dict(),
                'epsilon': agent.epsilon,
                'rewards': rewards,
                'losses': losses,
                'params': train_params
            }, f"{save_dir}/model_ep{ep}.pth")
            
        # 打印进度
        if ep % 50 == 0:
            avg_reward = np.mean(rewards[-50:])
            print(f"Episode {ep+1}, Avg Reward: {avg_reward:.1f}, Epsilon: {agent.epsilon:.3f}")

    env.close()
    print(f"Training completed. Models saved to {save_dir}")

if __name__ == "__main__":
    train_and_save()