import numpy as np
import time
from game.game import SnakeGame
from model.replay_buffer import ReplayBuffer


class QLearning:
    def __init__(self, state_size, action_size, learning_rate=0.1, discount_factor=0.9, epsilon=1.0,
                 epsilon_decay=0.995, min_epsilon=0.01):
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.min_epsilon = min_epsilon
        self.q_table = np.zeros((state_size, action_size))

    def update(self, state, action, reward, next_state, done):
        # 将状态离散化为整数索引
        state_index = self.state_to_index(state)
        next_state_index = self.state_to_index(next_state)

        # 计算 best_next_action，FLOPs: action_size 次比较
        best_next_action = np.argmax(self.q_table[next_state_index])

        # 计算 TD-target 和 TD-error，FLOPs: 3 次基本运算
        td_target = reward + self.discount_factor * self.q_table[next_state_index, best_next_action] * (1 - done)
        td_error = td_target - self.q_table[state_index, action]

        # 更新 Q-table，FLOPs: 1 次加法
        self.q_table[state_index, action] += self.learning_rate * td_error

    def get_action(self, state):
        # 将状态离散化为整数索引
        state_index = self.state_to_index(state)

        # 动作选择，FLOPs: state_size 次比较
        if np.random.rand() < self.epsilon:
            return np.random.randint(self.action_size)
        else:
            return np.argmax(self.q_table[state_index])

    def decay_epsilon(self):
        self.epsilon = max(self.min_epsilon, self.epsilon * self.epsilon_decay)

    def state_to_index(self, state):
        """
        将状态向量离散化为一个整数索引。
        这里使用一个简单的离散化方法，确保索引在q_table的有效范围内。
        """
        state = np.clip(state, -1, 1)
        state_discretized = (state * 100).astype(int)
        state_discretized = np.clip(state_discretized, 0, 99)

        index = 0
        for i, value in enumerate(state_discretized):
            index += value * (100 ** i)

        index = index % self.state_size
        return index

    def compute_flops(self, state):
        # 计算每次操作中的 FLOPs
        action_fops = self.state_size  # 动作选择需要 state_size 次比较
        q_table_update_fops = 3 + 1  # Q-table 更新: 3 次运算 (加法、乘法、减法) + 1 次加法更新

        total_fops = action_fops + q_table_update_fops
        return total_fops


def main_qlearning():
    env = SnakeGame()
    state_size = 100000  # 根据离散化后可能的索引数量调整状态空间大小
    action_size = 4  # 动作空间大小
    qlearning_model = QLearning(state_size, action_size)


    for episode in range(2000):
        state = env.reset()
        done = False
        total_reward = 0
        total_fops = 0

        while not done:
            start_time = time.time()
            action = qlearning_model.get_action(state)
            next_state, reward, done = env.step(action)
            qlearning_model.update(state, action, reward, next_state, done)
            total_reward += reward
            state = next_state

            print(f"inference time: {time.time()-start_time}")

            # 计算FLOPs
            total_fops += qlearning_model.compute_flops(state)

        qlearning_model.decay_epsilon()
        print(f"Episode {episode}, Total Reward: {total_reward}")
        print(f"Total FLOPs across all episodes: {total_fops}")


if __name__ == "__main__":
    main_qlearning()
