import numpy as np
import matplotlib.pyplot as plt
import random
from collections import defaultdict

class GridWorld:
    def __init__(self):
        self.action_space = [0, 1, 2, 3]
        self.action_meaning = {0: 'up', 1: 'right', 2: 'down', 3: 'left'}
        self.reward_map=np.array(
            [[0,0,0,1.0],
             [0,None,0,-1.0],
             [0,0,0,0]]
        )
        self.goal_state=(0,3)
        self.wall_state=(1,1)
        self.start_state=(2,0)
        self.agent_state=self.start_state
    
    @property
    def height(self):
        return self.reward_map.shape[0]
    
    @property
    def width(self):
        return self.reward_map.shape[1]

    @property
    def shape(self):
        return self.reward_map.shape

    @property
    def actions(self):
        return self.action_space

    @property
    def states(self):
        for i in range(self.height):
            for j in range(self.width):
                if (i, j) != self.wall_state:  # 排除墙壁状态
                    yield (i, j)

    def next_state(self, state, action):
        # 根据当前状态和动作计算下一个状态的位置
        action_move_map = [(-1, 0), (0, 1), (1, 0), (0, -1)]
        move=action_move_map[action]
        next_state=(state[0]+move[0],state[1]+move[1])
        ny,nx=next_state
        if ny<0 or ny>=self.height or nx<0 or nx>=self.width:
            next_state=state
        elif (ny,nx)==self.wall_state:
            next_state=state
        return next_state
    
    def reward(self, state, action, next_state):
        return self.reward_map[next_state]
    
    def is_terminal(self, state):
        return state == self.goal_state or state == (1, 3)  # 目标状态或负奖励状态
    
    def reset(self):
        self.agent_state = self.start_state
        return self.agent_state
    
    def step(self, action):
        current_state = self.agent_state
        next_state = self.next_state(current_state, action)
        reward = self.reward(current_state, action, next_state)
        self.agent_state = next_state
        done = self.is_terminal(next_state)
        return next_state, reward, done
    
    def render_v(self, value_function=None, save_path=None):
        """可视化价值函数"""
        if value_function is None:
            value_function = {}
        
        fig, ax = plt.subplots(figsize=(10, 8))
        
        # 创建网格显示
        for i in range(self.height):
            for j in range(self.width):
                if (i, j) == self.wall_state:
                    # 墙壁用黑色表示
                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, 
                                             facecolor='black', edgecolor='white'))
                    ax.text(j+0.5, self.height-1-i+0.5, 'WALL', 
                           ha='center', va='center', color='white', fontsize=12, weight='bold')
                elif (i, j) == self.goal_state:
                    # 目标状态用绿色表示
                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, 
                                             facecolor='lightgreen', edgecolor='black'))
                    value = value_function.get((i, j), 0)
                    ax.text(j+0.5, self.height-1-i+0.5, f'GOAL\n{value:.3f}', 
                           ha='center', va='center', fontsize=10, weight='bold')
                elif (i, j) == (1, 3):  # 负奖励状态
                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, 
                                             facecolor='lightcoral', edgecolor='black'))
                    value = value_function.get((i, j), 0)
                    ax.text(j+0.5, self.height-1-i+0.5, f'TRAP\n{value:.3f}', 
                           ha='center', va='center', fontsize=10, weight='bold')
                elif (i, j) == self.start_state:
                    # 起始状态用蓝色表示
                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, 
                                             facecolor='lightblue', edgecolor='black'))
                    value = value_function.get((i, j), 0)
                    ax.text(j+0.5, self.height-1-i+0.5, f'START\n{value:.3f}', 
                           ha='center', va='center', fontsize=10, weight='bold')
                else:
                    # 普通状态用白色表示
                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, 
                                             facecolor='white', edgecolor='black'))
                    value = value_function.get((i, j), 0)
                    ax.text(j+0.5, self.height-1-i+0.5, f'{value:.3f}', 
                           ha='center', va='center', fontsize=12, weight='bold')
        
        ax.set_xlim(0, self.width)
        ax.set_ylim(0, self.height)
        ax.set_aspect('equal')
        ax.set_title('Grid World Value Function (Monte Carlo Method)', fontsize=16, weight='bold')
        ax.set_xticks(range(self.width+1))
        ax.set_yticks(range(self.height+1))
        plt.grid(True, alpha=0.3)
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"图像已保存到: {save_path}")
        
        plt.show()

# 蒙特卡洛方法实现
def random_policy(env, state):
    """随机策略"""
    return random.choice(env.actions)

def generate_episode(env, policy):
    """生成一个episode"""
    episode = []
    state = env.reset()
    
    max_steps = 1000  # 防止无限循环
    steps = 0
    
    while steps < max_steps:
        action = policy(env, state)
        next_state, reward, done = env.step(action)
        episode.append((state, action, reward))
        
        if done:
            break
        state = next_state
        steps += 1
    
    return episode

def monte_carlo_first_visit(env, num_episodes=10000, gamma=0.9):
    """首次访问蒙特卡洛方法"""
    returns = defaultdict(list)
    value_function = defaultdict(float)
    
    print(f"开始运行蒙特卡洛方法，总共 {num_episodes} 个episodes...")
    
    for episode_num in range(num_episodes):
        # 生成episode
        episode = generate_episode(env, random_policy)
        
        # 计算每个状态的回报
        G = 0
        visited_states = set()
        
        # 从后往前计算回报
        for t in reversed(range(len(episode))):
            state, action, reward = episode[t]
            G = gamma * G + reward
            
            # 首次访问
            if state not in visited_states:
                visited_states.add(state)
                returns[state].append(G)
                value_function[state] = np.mean(returns[state])
        
        # 每1000个episode打印一次进度
        if (episode_num + 1) % 1000 == 0:
            print(f"Episode {episode_num + 1}/{num_episodes} completed")
    
    return dict(value_function)

def main():
    # 创建环境
    env = GridWorld()
    print("Grid World Environment:")
    print(f"Shape: {env.shape}")
    print(f"Actions: {env.actions} ({[env.action_meaning[a] for a in env.actions]})")
    print(f"Start state: {env.start_state}")
    print(f"Goal state: {env.goal_state}")
    print(f"Wall state: {env.wall_state}")
    print(f"Reward map:\n{env.reward_map}")
    
    # 使用蒙特卡洛方法计算价值函数
    print("\n" + "="*50)
    value_function = monte_carlo_first_visit(env, num_episodes=8000, gamma=0.9)
    
    print("\n" + "="*50)
    print("最终价值函数:")
    print("-" * 30)
    for state in sorted(value_function.keys()):
        print(f"State {state}: {value_function[state]:.4f}")
    
    # 可视化价值函数
    print("\n正在生成可视化图表...")
    env.render_v(value_function, save_path="e:\\PycharmProjects\\python-master\\RL\\value_function_visualization.png")
    
    # 分析结果
    print("\n" + "="*50)
    print("结果分析:")
    print("-" * 30)
    print(f"目标状态 {env.goal_state} 的价值: {value_function.get(env.goal_state, 'N/A')}")
    print(f"起始状态 {env.start_state} 的价值: {value_function.get(env.start_state, 'N/A')}")
    print(f"陷阱状态 (1,3) 的价值: {value_function.get((1,3), 'N/A')}")
    
    # 找到价值最高和最低的状态
    if value_function:
        max_state = max(value_function.keys(), key=lambda s: value_function[s])
        min_state = min(value_function.keys(), key=lambda s: value_function[s])
        print(f"价值最高的状态: {max_state} (价值: {value_function[max_state]:.4f})")
        print(f"价值最低的状态: {min_state} (价值: {value_function[min_state]:.4f})")

if __name__ == "__main__":
    main()