from rl_algorithms import (
    create_agent, run_rl_example, train_agent, test_agent,
    DQNAgent, DoubleDQNAgent, DuelingDQNAgent, PERDQNAgent, A2CAgent
)
import numpy as np
import matplotlib.pyplot as plt

def demo_single_algorithm(agent_type='dqn'):
    """
    演示单个算法的使用
    
    Args:
        agent_type: 智能体类型 ('dqn', 'double_dqn', 'dueling_dqn', 'per_dqn', 'a2c')
    """
    print(f"\n===== 演示 {agent_type.upper()} 算法 =====")
    
    # 使用工厂函数运行示例
    train_rewards, test_rewards, avg_reward = run_rl_example(
        agent_type=agent_type, 
        env_name='CartPole-v1', 
        episodes=50  # 减少训练轮数以加快演示
    )
    
    return train_rewards, avg_reward

def demo_all_algorithms():
    """
    演示所有算法并进行简单比较
    """
    print("\n===== 比较所有强化学习算法 =====")
    print("注意：此演示将训练多个算法，可能需要较长时间...")
    
    algorithms = ['dqn', 'double_dqn', 'dueling_dqn', 'per_dqn', 'a2c']
    results = {}
    
    # 为了快速演示，减少训练轮数
    episodes = 30
    
    for algo in algorithms:
        print(f"\n正在训练 {algo.upper()}...")
        _, _, avg_reward = run_rl_example(agent_type=algo, episodes=episodes)
        if avg_reward is not None:
            results[algo] = avg_reward
    
    # 显示比较结果
    if results:
        print("\n===== 算法性能比较 =====")
        for algo, reward in sorted(results.items(), key=lambda x: x[1], reverse=True):
            print(f"{algo.upper()}: 平均奖励 = {reward:.2f}")
    
    return results

def demo_advanced_usage():
    """
    高级用法示例 - 手动创建和配置智能体
    """
    try:
        import gym
        
        print("\n===== 高级用法示例 =====")
        
        # 创建环境
        env_name = 'LunarLander-v2'  # 一个更复杂的环境
        print(f"使用环境: {env_name}")
        env = gym.make(env_name)
        
        # 获取环境参数
        state_dim = env.observation_space.shape[0]
        action_dim = env.action_space.n
        
        print(f"状态维度: {state_dim}, 动作维度: {action_dim}")
        
        # 1. 手动创建Dueling DQN智能体并配置参数
        print("\n1. 手动创建和训练 Dueling DQN 智能体...")
        dueling_config = {
            'hidden_dim': 128,
            'lr': 5e-4,
            'gamma': 0.99,
            'epsilon': 1.0,
            'epsilon_min': 0.01,
            'epsilon_decay': 0.995,
            'batch_size': 64,
            'buffer_capacity': 100000,
            'target_update_freq': 10
        }
        
        dueling_agent = DuelingDQNAgent(state_dim, action_dim, **dueling_config)
        
        print(f"训练 Dueling DQN 20轮...")
        dueling_rewards = train_agent(
            env, 
            dueling_agent, 
            episodes=20, 
            max_steps=1000,
            target_update_freq=dueling_config['target_update_freq']
        )
        
        print("测试 Dueling DQN 智能体...")
        test_rewards, avg_reward = test_agent(env, dueling_agent, episodes=5)
        
        # 2. 手动创建A2C智能体并配置参数
        print("\n2. 手动创建和训练 A2C 智能体...")
        a2c_config = {
            'hidden_dim': 128,
            'lr': 3e-4,
            'gamma': 0.99,
            'entropy_coef': 0.01
        }
        
        a2c_agent = A2CAgent(state_dim, action_dim, **a2c_config)
        
        print(f"训练 A2C 20轮...")
        a2c_rewards = train_agent(
            env, 
            a2c_agent, 
            episodes=20, 
            max_steps=1000,
            update_every=5  # A2C特定参数，每5步更新一次
        )
        
        print("测试 A2C 智能体...")
        test_rewards, avg_reward = test_agent(env, a2c_agent, episodes=5)
        
        # 保存模型示例
        print("\n3. 模型保存和加载示例:")
        model_path = f"{env_name.lower()}_dueling_dqn_model.pth"
        print(f"保存 Dueling DQN 模型到: {model_path}")
        dueling_agent.save_model(model_path)
        
        # 创建新的智能体并加载模型
        new_agent = DuelingDQNAgent(state_dim, action_dim)
        print(f"从 {model_path} 加载模型")
        try:
            new_agent.load_model(model_path)
            print("模型加载成功!")
        except Exception as e:
            print(f"模型加载演示失败: {e}")
        
        # 关闭环境
        env.close()
        
        print("\n高级用法示例完成!")
        
    except ImportError:
        print("请先安装gym库: pip install gym")
    except Exception as e:
        print(f"运行高级示例时出错: {e}")

def demo_custom_environment():
    """
    自定义环境集成示例
    """
    print("\n===== 自定义环境集成示例 =====")
    print("以下是如何将我们的算法与自定义环境集成的示例代码:")
    
    example_code = '''
# 自定义环境示例 (需要实现类似gym的接口)
class CustomEnvironment:
    def __init__(self):
        self.state_dim = 4  # 状态维度
        self.action_dim = 2  # 动作维度
        self.current_state = None
    
    def reset(self):
        # 重置环境并返回初始状态
        self.current_state = np.random.randn(self.state_dim)
        return self.current_state
    
    def step(self, action):
        # 执行动作并返回下一状态、奖励、是否结束等信息
        next_state = self.current_state + np.random.randn(self.state_dim) * 0.1
        reward = -np.sum(np.square(next_state))  # 示例奖励函数
        done = np.sum(np.square(next_state)) > 10  # 示例结束条件
        self.current_state = next_state
        return next_state, reward, done, {}

# 使用自定义环境
def use_custom_environment():
    # 创建自定义环境
    env = CustomEnvironment()
    
    # 获取状态和动作维度
    state_dim = env.state_dim
    action_dim = env.action_dim
    
    # 创建智能体
    agent = create_agent('dqn', state_dim, action_dim)
    
    # 训练和测试
    train_agent(env, agent, episodes=50)
    test_agent(env, agent, episodes=10)
    
    return agent
'''
    
    print(example_code)
    print("注意：这只是示例代码，需要根据实际的自定义环境进行调整。")

def compare_hyperparameters():
    """
    超参数比较示例
    """
    try:
        import gym
        
        print("\n===== 超参数比较示例 =====")
        print("比较不同学习率对DQN性能的影响")
        
        env = gym.make('CartPole-v1')
        state_dim = env.observation_space.shape[0]
        action_dim = env.action_space.n
        
        learning_rates = [1e-4, 5e-4, 1e-3, 5e-3]
        results = {}
        
        for lr in learning_rates:
            print(f"\n测试学习率: {lr}")
            # 创建智能体
            agent = DQNAgent(
                state_dim, 
                action_dim, 
                lr=lr,
                batch_size=64,
                gamma=0.99
            )
            
            # 训练
            train_rewards = train_agent(
                env, 
                agent, 
                episodes=30,
                max_steps=500
            )
            
            # 测试
            _, avg_reward = test_agent(env, agent, episodes=5)
            results[lr] = avg_reward
        
        # 显示结果
        print("\n学习率比较结果:")
        for lr, reward in sorted(results.items(), key=lambda x: x[1], reverse=True):
            print(f"学习率 {lr:.1e}: 平均奖励 = {reward:.2f}")
        
        env.close()
        
    except ImportError:
        print("请先安装gym库: pip install gym")
    except Exception as e:
        print(f"运行超参数比较时出错: {e}")

def visualize_training(rewards, algorithm_name):
    """
    可视化训练过程
    
    Args:
        rewards: 训练奖励列表
        algorithm_name: 算法名称
    """
    try:
        import matplotlib.pyplot as plt
        
        plt.figure(figsize=(10, 6))
        plt.plot(rewards, label='每轮奖励')
        
        # 计算移动平均以平滑曲线
        window_size = min(10, len(rewards) // 5)  # 根据数据长度调整窗口大小
        if window_size > 1:
            moving_avg = np.convolve(rewards, np.ones(window_size)/window_size, mode='valid')
            plt.plot(range(window_size-1, len(rewards)), moving_avg, 'r-', label=f'{window_size}轮移动平均')
        
        plt.title(f'{algorithm_name.upper()} 训练过程')
        plt.xlabel('训练轮数')
        plt.ylabel('奖励')
        plt.legend()
        plt.grid(True)
        
        # 保存图片而不显示（避免可能的显示问题）
        plt.savefig(f'{algorithm_name}_training.png')
        print(f"训练可视化图表已保存为: {algorithm_name}_training.png")
        
    except Exception as e:
        print(f"可视化时出错: {e}")

def main():
    """
    强化学习算法演示主函数
    """
    print("===== 强化学习算法演示程序 =====")
    print("支持的算法: DQN, Double DQN, Dueling DQN, PER DQN, A2C")
    
    # 1. 单个算法演示
    print("\n1. 单个算法快速演示 (DQN)")
    train_rewards, avg_reward = demo_single_algorithm('dqn')
    
    # 可视化训练结果
    if train_rewards:
        visualize_training(train_rewards, 'dqn')
    
    # 2. 高级用法演示
    print("\n2. 高级用法演示 (自定义配置)")
    demo_advanced_usage()
    
    # 3. 自定义环境集成示例
    print("\n3. 自定义环境集成指南")
    demo_custom_environment()
    
    # 4. 超参数比较示例
    print("\n4. 超参数比较示例")
    compare_hyperparameters()
    
    # 5. 询问用户是否要运行所有算法的比较（可能需要很长时间）
    user_input = input("\n是否要运行所有算法的完整比较？(y/n，可能需要较长时间): ")
    if user_input.lower() == 'y':
        demo_all_algorithms()
    else:
        print("跳过完整算法比较。")
    
    print("\n===== 演示完成 =====")
    print("\n使用指南:")
    print("1. 要使用特定算法: create_agent(agent_type, state_dim, action_dim, **kwargs)")
    print("2. 训练智能体: train_agent(env, agent, episodes=100)")
    print("3. 测试智能体: test_agent(env, agent, episodes=10)")
    print("4. 快速运行示例: run_rl_example(agent_type='dqn', env_name='CartPole-v1')")

# 如果直接运行此文件，则执行主函数
if __name__ == "__main__":
    main()