import random


# 定义动作空间，0代表石头，1代表剪刀，2代表布
action_space = [0, 1, 2]

# 初始化Q表，键是状态，值是每个动作的Q值，这里简单用一个字典表示，状态用对手上一次的动作表示
Q = {}
for action in action_space:
    Q[action] = [0.0, 0.0, 0.0]


# 定义奖励函数，赢了得1分，输了不得分，平局得0.5分
def get_reward(agent_action, opponent_action):
    if (agent_action == 0 and opponent_action == 1) or \
            (agent_action == 1 and opponent_action == 2) or \
            (agent_action == 2 and opponent_action == 0):
        return 1
    elif agent_action == opponent_action:
        return 0.5
    else:
        return 0


# 选择动作，根据Q值和一定的探索率（epsilon）来选择动作
def choose_action(opponent_last_action, epsilon):
    if opponent_last_action not in Q:
        Q[opponent_last_action] = [0.0, 0.0, 0.0]
    if random.random() < epsilon:
        # 探索，随机选择动作
        return random.choice(action_space)
    else:
        # 利用，选择Q值最大的动作
        return action_space[Q[opponent_last_action].index(max(Q[opponent_last_action]))]


# 更新Q值，使用Q-learning的更新公式
def update_Q(opponent_last_action, agent_action, reward, next_opponent_action, alpha, gamma):
    if next_opponent_action not in Q:
        Q[next_opponent_action] = [0.0, 0.0, 0.0]
    max_next_Q = max(Q[next_opponent_action])
    Q[opponent_last_action][agent_action] = Q[opponent_last_action][agent_action] + \
                                            alpha * (reward + gamma * max_next_Q - Q[opponent_last_action][agent_action])


# 进行游戏训练
def train(num_episodes, alpha=0.1, gamma=0.9, epsilon=0.2):
    opponent_last_action = random.choice(action_space)
    for episode in range(num_episodes):
        agent_action = choose_action(opponent_last_action, epsilon)
        opponent_action = random.choice(action_space)
        reward = get_reward(agent_action, opponent_action)
        update_Q(opponent_last_action, agent_action, reward, opponent_action, alpha, gamma)
        opponent_last_action = opponent_action


# 进行游戏测试
def test(num_games):
    opponent_last_action = random.choice(action_space)
    win_count = 0
    for game in range(num_games):
        agent_action = choose_action(opponent_last_action, 0)  # 测试时不进行探索，只利用
        opponent_action = random.choice(action_space)
        reward = get_reward(agent_action, opponent_action)
        if reward == 1:
            win_count += 1
        opponent_last_action = opponent_action
    print(f"测试 {num_games} 局，赢了 {win_count} 局，胜率为 {win_count / num_games * 100}%")


if __name__ == "__main__":
    train(10000)
    test(100)
