import numpy as np
import pickle
from rewards import draw_rewards

class QLearningAgent:
    def __init__(self, size, epsilon=0.1, alpha=0.1, gamma=0.9):
        self.size = size
        self.epsilon = epsilon
        self.alpha = alpha
        self.gamma = gamma
        self.q_table = {}

    def get_state_key(self, state):
        # 将 state 转换为元组，以便用作字典键
        return tuple(map(tuple, state))

    def get_valid_actions(self, state):
        # 获取所有合法动作
        return [(r, c) for r in range(self.size) for c in range(self.size) if state[r][c] == 0]

    def choose_action(self, state):
        valid_actions = self.get_valid_actions(state)
    
        # 如果 valid_actions 为空，返回 None 或其他处理方式
        if not valid_actions:
            return None
    
        if np.random.rand() < self.epsilon:
            # 探索：随机选择合法动作
            return valid_actions[np.random.randint(len(valid_actions))]
        else:
            # 利用：选择 Q 值最大的合法动作
            state_key = self.get_state_key(state)
            if state_key not in self.q_table:
                self.q_table[state_key] = np.zeros((self.size, self.size))
            q_values = self.q_table[state_key]
        
            # 获取合法动作的 Q 值
            q_values_valid = [q_values[r, c] for r, c in valid_actions]
        
            # 选择 Q 值最大的动作
            best_action_index = np.argmax(q_values_valid)
            return valid_actions[best_action_index]

    def update_q_table(self, state, action, reward, next_state):
        state_key = self.get_state_key(state)
        next_state_key = self.get_state_key(next_state)

        if next_state_key not in self.q_table:
            self.q_table[next_state_key] = np.zeros((self.size, self.size))

        max_q_next = np.max(self.q_table[next_state_key])
        q_value = self.q_table[state_key][action[0], action[1]]

        self.q_table[state_key][action[0], action[1]] = q_value + self.alpha * (reward + self.gamma * max_q_next - q_value)

def train_agent(agent, env, episodes=1000):
    total_rewards = []  # 初始化 total_rewards
    for episode in range(episodes):
        state = env.reset()
        done = False
        episode_reward = 0
        while not done:
            action = agent.choose_action(state)
            if action is None:
                break  # 没有合法动作时停止
            next_state, reward, done = env.step(action)
            agent.update_q_table(state, action, reward, next_state)
            state = next_state
            episode_reward += reward
            if (episode + 1) % 10 == 0:
                print(f"Episode {episode + 1}/{episodes}, Action: {action}, Reward: {reward}")
        total_rewards.append(episode_reward)
        if (episode + 1) % 100 == 0:
            with open(f'q_table_episode_{episode + 1}.pkl', 'wb') as f:
                pickle.dump(agent.q_table, f)
            print(f"Q表已保存至 q_table_episode_{episode + 1}.pkl")
        # DEBUG 隐藏！
        # env.check_space()  # 检查训练状态
    env.close()
    draw_rewards(total_rewards)  # 绘制奖励曲线
