import numpy as np

# 定义游戏状态类
class GameState:
    def __init__(self, sun_count, zombies, plants, game_status, wave, lawnmowers, time_remaining):
        self.sun_count = sun_count
        self.zombies = zombies
        self.plants = plants
        self.game_status = game_status
        self.wave = wave
        self.lawnmowers = lawnmowers
        self.time_remaining = time_remaining

    def get_state_vector(self):
        state = [self.sun_count, self.wave, self.time_remaining]
        for zombie in self.zombies:
            state.extend([zombie['row'], zombie['position'], zombie['health']])
        for plant in self.plants:
            state.extend([plant['row'], plant['position'], plant['cooldown']])
        for lawnmower in self.lawnmowers:
            state.append(int(lawnmower['available']))
        return np.array(state)

# 定义 Q - learning 智能体类
class QLearningAgent:
    def __init__(self, state_size, action_size, learning_rate=0.1, discount_factor=0.9, exploration_rate=0.1):
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.q_table = np.zeros((state_size, action_size))

    def choose_action(self, state):
        if np.random.uniform(0, 1) < self.exploration_rate:
            action = np.random.choice(self.action_size)
        else:
            state_index = hash(tuple(state)) % self.state_size
            action = np.argmax(self.q_table[state_index])
        return action

    def update_q_table(self, state, action, reward, next_state):
        state_index = hash(tuple(state)) % self.state_size
        next_state_index = hash(tuple(next_state)) % self.state_size
        self.q_table[state_index, action] = (1 - self.learning_rate) * self.q_table[state_index, action] + \
                                            self.learning_rate * (reward + self.discount_factor * np.max(
                                                self.q_table[next_state_index]))

    def save_model(self, filename):
        np.save(filename, self.q_table)

# 模拟接收 JSON 格式的环境数据
def simulate_receive_json():
    import random
    json_data_list = []
    for _ in range(1000):
        sun_count = random.randint(100, 300)
        num_zombies = random.randint(1, 3)
        zombies = []
        for _ in range(num_zombies):
            zombies.append({
                "type": random.choice(["normal", "conehead"]),
                "row": random.randint(1, 5),
                "position": random.randint(200, 500),
                "health": random.randint(50, 200)
            })
        num_plants = random.randint(1, 3)
        plants = []
        for _ in range(num_plants):
            plants.append({
                "type": random.choice(["Sunflower", "Peashooter"]),
                "row": random.randint(1, 5),
                "position": random.randint(100, 200),
                "cooldown": random.randint(0, 10)
            })
        game_status = "playing"
        wave = random.randint(1, 5)
        lawnmowers = [{"row": i, "available": random.choice([True, False])} for i in range(1, 6)]
        time_remaining = random.randint(10, 60)

        json_data = {
            "sun_count": sun_count,
            "zombies": zombies,
            "plants": plants,
            "game_status": game_status,
            "wave": wave,
            "lawnmowers": lawnmowers,
            "time_remaining": time_remaining
        }
        json_data_list.append(json_data)
    return json_data_list

# 定义动作空间和植物种类
ROWS = 5
PLANT_TYPES = ["Sunflower", "Peashooter"]
ACTION_SIZE = ROWS * len(PLANT_TYPES)

state_size = 1000

# 创建 Q - learning 智能体
agent = QLearningAgent(state_size, ACTION_SIZE)

# 模拟接收 JSON 数据
json_data_list = simulate_receive_json()

# 训练过程
prev_state_vector = None
prev_action = None
for json_data in json_data_list:
    game_state = GameState(json_data["sun_count"], json_data["zombies"], json_data["plants"],
                           json_data["game_status"], json_data["wave"], json_data["lawnmowers"],
                           json_data["time_remaining"])
    state_vector = game_state.get_state_vector()

    if prev_state_vector is not None:
        reward = 10
        agent.update_q_table(prev_state_vector, prev_action, reward, state_vector)

    action = agent.choose_action(state_vector)
    prev_state_vector = state_vector
    prev_action = action

# 保存训练好的模型
model_filename = "D:\Project\Pycharm Project\pythonProject4\models\direction_model.npy"
agent.save_model(model_filename)
print(f"Training completed. Model saved to {model_filename}")