import math
import random
import time
from collections import deque

import pygame

# Initialize pygame
pygame.init()

# Screen dimensions
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 500
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Virtual Pet")

# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)  # Food objects
GREEN = (0, 255, 0)  # Toilet objects
BLUE = (0, 0, 255)  # Toy box
YELLOW = (255, 255, 0)  # Toys
PURPLE = (128, 0, 128)  # Bladder meter
GRAY = (200, 200, 200)  # energy meter

# the 4 Algorithm (that we want to compare)
ALGORITHMS = {
    "Potential Fields": "potential_fields",
    "Q-Learning": "q_learning",
    "Systematic Search": "systematic",
    "Random Exploration": "random"
}


# Load images
def load_and_scale_image(path, scale_ratio=1.0):
    try:
        image = pygame.image.load(path).convert_alpha()
        if scale_ratio != 1.0:
            width, height = image.get_size()
            new_size = (int(width * scale_ratio), int(height * scale_ratio))
            image = pygame.transform.scale(image, new_size)
        return image
    except Exception as e:
        print(f"[ERROR] Failed to load: {path} - {e}")
        fallback = pygame.Surface((40, 40))
        fallback.fill((255, 0, 0))  # Red background
        return fallback


background_img = load_and_scale_image("Image/background.png", scale_ratio=0.55)
pet_img = load_and_scale_image("Image/pet.png", scale_ratio=0.04)
pet_withToy_img = load_and_scale_image("Image/pet_withToy.png", scale_ratio=0.04)
feed_img = load_and_scale_image("Image/feed.png", scale_ratio=0.16)
toy_img = load_and_scale_image("Image/toy.png", scale_ratio=0.2)
toy_box_img = load_and_scale_image("Image/toy_box.png", scale_ratio=0.6)
toy_box_withToy_img = load_and_scale_image("Image/toy_box_withToy.png", scale_ratio=0.6)
toilet_img = load_and_scale_image("Image/toilet.png", scale_ratio=0.09)


# Button class
class Button:
    def __init__(self, x, y, width, height, text, color=GRAY, hover_color=BLUE, text_color=BLACK, disabled=False):
        self.rect = pygame.Rect(x, y, width, height)
        self.text = text
        self.color = color
        self.hover_color = hover_color
        self.text_color = text_color
        self.font = pygame.font.SysFont(None, 30)
        self.is_hovered = False
        self.disabled = disabled

    def draw(self, screen):
        if self.disabled:
            color = (180, 180, 180)  # GREY
        else:
            color = self.hover_color if self.is_hovered else self.color
        pygame.draw.rect(screen, color, self.rect)
        pygame.draw.rect(screen, BLACK, self.rect, 2)  # Border
        # Center text label
        text_surface = self.font.render(self.text, True, self.text_color)
        text_rect = text_surface.get_rect(center=self.rect.center)
        screen.blit(text_surface, text_rect)

    def check_hover(self, pos):
        # Check if mouse is over button
        self.is_hovered = self.rect.collidepoint(pos)
        return self.is_hovered

    def is_clicked(self, pos, event):
        if self.disabled:
            return False
        if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
            return self.rect.collidepoint(pos)
        return False


# Pet class with the 4 algorithms
class Pet:
    def __init__(self, algorithm="random"):
        self.x = SCREEN_WIDTH // 2
        self.y = SCREEN_HEIGHT // 2
        self.radius = 25

        # Movement
        self.base_speed = 2
        self.speed = self.base_speed
        self.is_moving = False
        self.direction = random.uniform(0, 2 * math.pi)  # radians弧度制

        # pet needs
        self.energy = 100  # Start with 100% energy
        self.bladder = 0  # Start with 0% bladder

        # Memory
        self.memory = {}  # Record the location of all passing targets
        self.completed_targets = {}  # targets that have been accomplished
        self.known_targets = {}  # Temporarily record the position of the current target

        # Task management
        self.carrying_toy = False
        self.toy_dropped = False
        self.current_activity = None
        self.activity_start_time = 0
        self.activity_time_limit = 5
        self.sense_range = 37.5
        self.algorithm = algorithm

        # Potential Fields parameters
        self.attraction_gain = 0.5  # Attractive force
        self.repulsion_gain = 20000  # Repulsive force
        self.repulsion_range = 100  # Repulsive force range
        self.epsilon_repulsion = 1e-2  # The repulsion force value changes drastically, and the parameter setting is slightly larger and more stable
        self.epsilon_attraction = 1e-5  # The attraction becomes less when it is very close

        # Q-Learning parameters
        self.q_table = {}  # Status-Action Q-value table
        self.grid_size = 20  # State discrete granularity: The smaller the more detailed
        self.learning_rate = 0.1  # weight of new experience
        self.discount_factor = 0.9  # how important future rewards are
        self.exploration_rate = 0.4  # 40% Random Moves
        self.exploration_decay = 0.995
        self.exploration_rate_min = 0.05

        # Systematic Search parameters
        self.search_path = []  # A list of the coordinates of the search route
        self.search_index = 0  # The current coordinate
        self.search_spacing = 75  # The spacing between coordinate points
        self.search_direction = 1  # Direction marker for search (extensible)

        # Movement history for visualization
        self.movement_history = deque(maxlen=100)

    # 0.1.Click the button to start moving
    def start_activity(self, activity, objects):
        if activity == "toy" and (self.carrying_toy or self.toy_dropped):
            return  # carrying_toy or toy_dropped It won't look for it anymore
        if self.current_activity is None:
            self.current_activity = activity
            self.activity_start_time = time.time()
            self.is_moving = True
            self.speed = self.base_speed * 1.5
            for obj in objects:
                if obj.name == activity:
                    self.known_targets[activity] = (obj.x, obj.y)
                    self.direction = math.atan2(obj.y - self.y, obj.x - self.x)
                    break

    # 0.2.Core Movement Method
    def update(self, objects):
        if self.current_activity:
            if time.time() - self.activity_start_time > self.activity_time_limit:
                self.current_activity = None
                self.is_moving = False
                self.speed = self.base_speed
        self.move(objects)  # Invoke the movement logic
        self.sense_objects(objects)  # Invoke perceptual logic

    # 0.3.Movement logic (move with different algorithms)
    def move(self, objects):
        if not self.is_moving or self.energy <= 0:
            return

        # potential_fields and q_learning cannot calculate the direction without a known_targets
        if self.current_activity and self.current_activity in self.known_targets:
            target_x, target_y = self.known_targets[self.current_activity]
            if self.algorithm == "potential_fields":
                self.move_with_potential_fields(target_x, target_y, objects)
            elif self.algorithm == "q_learning":
                self.move_with_q_learning(target_x, target_y, objects)
            elif self.algorithm == "systematic":
                self.move_with_systematic_search(objects)
            else:
                self.move_with_random_exploration()
        else:
            if self.algorithm == "systematic":
                self.move_with_systematic_search()
            else:
                self.move_with_random_exploration()

        # Keep within bounds
        self.x = max(self.radius, min(self.x, SCREEN_WIDTH - self.radius))
        self.y = max(self.radius, min(self.y, SCREEN_HEIGHT - self.radius))

        # Consume energy only when moving
        if self.is_moving:
            self.energy = max(0, self.energy - 0.1)

    # 0.4.Perceptual logic
    #   0.4.1.determine whether the target is reached
    def sense_objects(self, objects):
        # Don't sense objects when not doing an activity
        if not self.current_activity:
            return

        for obj in objects:
            if obj.name == self.current_activity:
                dist = ((self.x - obj.x) ** 2 + (self.y - obj.y) ** 2) ** 0.5
                if dist < self.sense_range:
                    if obj.name not in self.completed_targets:
                        self.completed_targets[obj.name] = (obj.x, obj.y)
                    self.complete_activity(obj, objects)

    #   0.4.2.End the mission when you reach the objective
    def complete_activity(self, obj, objects):
        if obj.name in self.completed_targets:
            if obj.name == "feed":
                self.energy = min(100, self.energy + 30)
                self.bladder = min(100, self.bladder + 20)
            elif obj.name == "toilet":
                self.bladder = max(0, self.bladder - 40)
            elif obj.name == "toy":
                self.carrying_toy = True
                # Removes the toy from the list of objects, so that it disappears from the frame
                for i, o in enumerate(objects):
                    if o.name == "toy":
                        del objects[i]
                        break

            elif obj.name == "toy_box" and self.carrying_toy:
                self.carrying_toy = False
                self.toy_dropped = True
                self.completed_targets["toy_box"] = (obj.x, obj.y)

        self.current_activity = None
        self.is_moving = False
        self.speed = self.base_speed

    # 1.Potential Fields
    #   1.1.Calculates the direction and the step size of the movement
    def calculate_potential_field(self, target_x, target_y, objects):
        # target_x, target_y Target location
        # (dx, dy) Target direction vector
        dx = target_x - self.x
        dy = target_y - self.y
        distance = max(self.epsilon_attraction, math.sqrt(dx * dx + dy * dy))
        attractive_force_x = self.attraction_gain * dx / distance
        attractive_force_y = self.attraction_gain * dy / distance
        total_force_x = attractive_force_x
        total_force_y = attractive_force_y

        for obj in objects:
            if obj.name != self.current_activity:
                obj_dx = self.x - obj.x
                obj_dy = self.y - obj.y
                obj_distance = max(self.epsilon_repulsion, math.sqrt(obj_dx ** 2 + obj_dy ** 2))
                if obj_distance < self.repulsion_range:
                    repulsion_term = (1 / obj_distance - 1 / self.repulsion_range)
                    repulsive_force_magnitude = self.repulsion_gain * repulsion_term * (1 / (obj_distance ** 2))
                    repulsive_force_x = repulsive_force_magnitude * obj_dx / obj_distance
                    repulsive_force_y = repulsive_force_magnitude * obj_dy / obj_distance
                    total_force_x += repulsive_force_x
                    total_force_y += repulsive_force_y

        # Normalization of Resultant Forces (Unitization)
        force_magnitude = math.sqrt(total_force_x ** 2 + total_force_y ** 2)
        if force_magnitude < 1e-5:
            # Add random perturbations to break the loop (stuck)
            total_force_x += random.uniform(-0.01, 0.01)
            total_force_y += random.uniform(-0.01, 0.01)
            force_magnitude = math.sqrt(total_force_x ** 2 + total_force_y ** 2)
        total_force_x = total_force_x / force_magnitude * self.speed
        total_force_y = total_force_y / force_magnitude * self.speed

        return total_force_x, total_force_y

    #   1.2.Move
    def move_with_potential_fields(self, target_x, target_y, objects):
        force_x, force_y = self.calculate_potential_field(target_x, target_y, objects)
        self.x += force_x
        self.y += force_y
        self.movement_history.append((self.x, self.y))

    # 2.Q-Learning
    #   2.1.Discrete network: Converts the distance difference between the current and the target into a "discrete network"
    def discretize_state(self, target_x, target_y):
        dx = int((target_x - self.x) / self.grid_size)
        dy = int((target_y - self.y) / self.grid_size)
        return f"{dx},{dy}"

    #   2.2.Move and update the Q table with rewards and punishments
    def move_with_q_learning(self, target_x, target_y, objects):
        current_state = self.discretize_state(target_x, target_y)
        current_distance = math.sqrt((self.x - target_x) ** 2 + (self.y - target_y) ** 2)

        # Explore && Exploit
        if random.random() < self.exploration_rate:
            action = random.choice(["up", "down", "left", "right"])
        else:
            if current_state not in self.q_table:
                self.q_table[current_state] = {"up": 0, "down": 0, "left": 0, "right": 0}
            action = max(self.q_table[current_state].items(), key=lambda x: x[1])[0]
        if action == "up":
            self.y -= self.speed
        elif action == "down":
            self.y += self.speed
        elif action == "left":
            self.x -= self.speed
        elif action == "right":
            self.x += self.speed
        self.x = max(self.radius, min(self.x, SCREEN_WIDTH - self.radius))
        self.y = max(self.radius, min(self.y, SCREEN_HEIGHT - self.radius))

        new_state = self.discretize_state(target_x, target_y)
        new_distance = math.sqrt((self.x - target_x) ** 2 + (self.y - target_y) ** 2)

        # Reward function
        reward = 0
        if new_distance < 5:
            self.x, self.y = target_x, target_y
            reward += 200
            self.is_moving = False
            self.current_activity = None
        elif new_distance < current_distance:
            reward += 1
        else:
            reward -= 2
        if new_distance > 300:
            reward -= 5
        if current_state not in self.q_table:
            self.q_table[current_state] = {"up": 0, "down": 0, "left": 0, "right": 0}
        if new_state not in self.q_table:
            self.q_table[new_state] = {"up": 0, "down": 0, "left": 0, "right": 0}

        # Update Q form
        max_next_q = max(self.q_table[new_state].values())
        self.q_table[current_state][action] = ((1 - self.learning_rate) * self.q_table[current_state][action]
                                               + self.learning_rate * (reward + self.discount_factor * max_next_q))
        # Exploratory rate decay: Let the agent trust the known experience more as the experience grows
        self.exploration_rate = max(self.exploration_rate_min, self.exploration_rate * self.exploration_decay)

        self.movement_history.append((self.x, self.y))

    # 3.Systematic Search
    #   3.1.Serpentine route
    def generate_search_path(self):
        self.search_path = []
        # Divide the entire map into rows and columns in a grid
        rows = SCREEN_HEIGHT // self.search_spacing
        cols = SCREEN_WIDTH // self.search_spacing

        for row in range(rows):
            if row % 2 == 0:
                # Left to right
                for col in range(cols):
                    x = col * self.search_spacing + self.search_spacing // 2
                    y = row * self.search_spacing + self.search_spacing // 2
                    self.search_path.append((x, y))
            else:
                # Right to left
                for col in range(cols - 1, -1, -1):
                    x = col * self.search_spacing + self.search_spacing // 2
                    y = row * self.search_spacing + self.search_spacing // 2
                    self.search_path.append((x, y))

        self.search_index = 0

    #   3.2.Once reach the target point, switch to the next one.
    def move_with_systematic_search(self, objects):
        # Step 0: If the target is already in memory, skip the system search and go straight to the target
        if self.current_activity in self.memory:
            target_x, target_y = self.memory[self.current_activity]
            dx = target_x - self.x
            dy = target_y - self.y
            distance = math.hypot(dx, dy)

            if distance > 1:
                self.x += (dx / distance) * self.speed
                self.y += (dy / distance) * self.speed
                self.movement_history.append((self.x, self.y))
            return

        # Step 1: Generate path
        if not self.search_path:
            self.generate_search_path()

        # Step 2: Move along the path
        target_x, target_y = self.search_path[self.search_index]
        dx = target_x - self.x
        dy = target_y - self.y
        distance = math.hypot(dx, dy)
        if distance < 5:
            self.search_index = (self.search_index + 1) % len(self.search_path)
            target_x, target_y = self.search_path[self.search_index]
            dx = target_x - self.x
            dy = target_y - self.y
            distance = max(1, math.hypot(dx, dy))
        self.x += (dx / distance) * self.speed
        self.y += (dy / distance) * self.speed
        self.movement_history.append((self.x, self.y))

        # Step 3: Perceives surrounding objects and records them (even if they are not the current target)
        for obj in objects:
            dist = math.hypot(self.x - obj.x, self.y - obj.y)
            if dist < self.sense_range and obj.name not in self.memory:
                self.memory[obj.name] = (obj.x, obj.y)

    # 4.Random Exploration
    def move_with_random_exploration(self):
        self.direction += random.uniform(-0.5, 0.5)  # Randomly changes direction every frame(-30 to +30 degrees)
        new_x = self.x + math.cos(self.direction) * self.speed
        new_y = self.y + math.sin(self.direction) * self.speed

        # Bounce off walls with random angle variation
        bounced = False
        if new_x < self.radius:
            self.direction = math.pi - self.direction + random.uniform(-0.5, 0.5)
            new_x = self.radius
            bounced = True
        elif new_x > SCREEN_WIDTH - self.radius:
            self.direction = math.pi - self.direction + random.uniform(-0.5, 0.5)
            new_x = SCREEN_WIDTH - self.radius
            bounced = True
        if new_y < self.radius:
            self.direction = -self.direction + random.uniform(-0.5, 0.5)
            new_y = self.radius
            bounced = True
        elif new_y > SCREEN_HEIGHT - self.radius:
            self.direction = -self.direction + random.uniform(-0.5, 0.5)
            new_y = SCREEN_HEIGHT - self.radius
            bounced = True

        # If we bounced, skip the normal movement this frame
        if bounced:
            self.movement_history.append((self.x, self.y))
            return
        # Normal movement if no bounce occurred
        self.x = new_x
        self.y = new_y
        self.movement_history.append((self.x, self.y))

    def draw(self, screen):
        # Draw movement path
        if len(self.movement_history) > 1:
            pygame.draw.lines(screen, (200, 200, 255), False, self.movement_history, 1)

        # color = YELLOW if self.carrying_toy else BLUE
        # pygame.draw.circle(screen, color, (int(self.x), int(self.y)), self.radius)
        if self.carrying_toy:
            img_to_draw = pet_withToy_img
        else:
            img_to_draw = pet_img
        img_rect = img_to_draw.get_rect(center=(int(self.x), int(self.y)))
        screen.blit(img_to_draw, img_rect)

        # Draw stats
        bar_width = 200
        pygame.draw.rect(screen, RED, (10, 10, self.energy * 2, 20))
        pygame.draw.rect(screen, PURPLE, (10, 40, self.bladder * 2, 20))

        font = pygame.font.SysFont(None, 24)
        screen.blit(font.render(f"Energy: {int(self.energy)}", True, BLACK), (220, 10))
        screen.blit(font.render(f"Bladder: {int(self.bladder)}", True, BLACK), (220, 40))

        # Draw current algorithm
        algo_text = f"Algorithm: {self.algorithm}"
        screen.blit(font.render(algo_text, True, BLACK), (10, SCREEN_HEIGHT - 30))

        if self.current_activity:
            remaining_time = max(0, self.activity_time_limit - (time.time() - self.activity_start_time))
            screen.blit(font.render(f"Activity: {self.current_activity} ({remaining_time:.1f}s)", True, BLACK),
                        (10, 100))


# Object class
class Object:
    def __init__(self, name, x, y):
        self.name = name
        self.x = x
        self.y = y
        self.radius = 25
        self.colors = {
            "toilet": GREEN,
            "toy": YELLOW,
            "feed": RED,
            "toy_box": BLUE
        }

    # Make sure that the objects do not overlap
    @classmethod
    def create_non_overlapping_objects(cls, names, min_distance=80):
        placed_objects = []
        for name in names:
            while True:
                x = random.randint(50, SCREEN_WIDTH - 50)
                y = random.randint(50, SCREEN_HEIGHT - 50)
                new_obj = cls(name, x, y)
                if all(math.hypot(x - obj.x, y - obj.y) >= min_distance for obj in placed_objects):
                    placed_objects.append(new_obj)
                    break
        return placed_objects

    def draw(self, screen, pet=None):
        # pygame.draw.circle(screen, self.colors[self.name], (int(self.x), int(self.y)), self.radius)
        # font = pygame.font.SysFont(None, 24)
        # text = font.render(self.name, True, BLACK)
        # screen.blit(text, (self.x - text.get_width() // 2, self.y + self.radius + 5))
        if self.name == "toy_box" and pet and pet.toy_dropped:
            icon = toy_box_withToy_img
        else:
            icon_map = {
                "feed": feed_img,
                "toilet": toilet_img,
                "toy": toy_img,
                "toy_box": toy_box_img
            }
            icon = icon_map.get(self.name)
        if icon:
            icon_rect = icon.get_rect(center=(int(self.x), int(self.y)))
            screen.blit(icon, icon_rect)


def show_algorithm_selection():
    screen.fill(WHITE)
    font_large = pygame.font.SysFont(None, 48)
    title = font_large.render("Select Search Algorithm", True, BLACK)
    screen.blit(title, (SCREEN_WIDTH // 2 - title.get_width() // 2, 50))

    buttons = []
    y_pos = 150
    for algo_name in ALGORITHMS.keys():
        btn = Button(SCREEN_WIDTH // 2 - 150, y_pos, 300, 50, algo_name)
        buttons.append(btn)
        y_pos += 70

    while True:
        mouse_pos = pygame.mouse.get_pos()

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
                return None
            for btn in buttons:
                btn.check_hover(mouse_pos)
                if btn.is_clicked(mouse_pos, event):
                    return ALGORITHMS[btn.text]

        for btn in buttons:
            btn.draw(screen)

        pygame.display.flip()
        pygame.time.delay(30)


def main():
    # 1.Algorithm selection
    selected_algorithm = show_algorithm_selection()
    if selected_algorithm is None:
        return

    # 2.To create instance: pet, object, button
    clock = pygame.time.Clock()
    pet = Pet(selected_algorithm)
    objects = Object.create_non_overlapping_objects(["toilet", "toy", "feed", "toy_box"])
    control_buttons = [
        Button(10, 130, 150, 40, "Feed"),
        Button(10, 180, 150, 40, "Find Toy"),
        Button(10, 230, 150, 40, "Find Box"),
        Button(10, 280, 150, 40, "Toilet")
    ]
    font = pygame.font.SysFont(None, 36)
    game_over = False
    won = False
    attempts = {activity: 0 for activity in ["feed", "toy", "toy_box", "toilet"]}
    running = True

    # 3.The main loop of the game
    while running:
        #   3.1.Get the event
        mouse_pos = pygame.mouse.get_pos()
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False
            if not game_over:
                for btn in control_buttons:
                    if btn.is_clicked(mouse_pos, event):
                        activity_map = {
                            "Feed": "feed",
                            "Find Toy": "toy",
                            "Find Box": "toy_box",
                            "Toilet": "toilet"
                        }
                        activity = activity_map[btn.text]
                        # If already find the toy, ignore the click
                        if activity == "toy" and pet.carrying_toy:
                            continue
                        pet.start_activity(activity, objects)
                        attempts[activity] += 1
            if game_over and event.type == pygame.KEYDOWN and event.key == pygame.K_r:
                return main()  # Restart game

        #   3.2.PET move
        if not game_over:
            pet.update(objects)
            # Game over conditions
            if pet.energy <= 0:
                game_over = True
                lose_reason = "Ran out of energy!"
            elif pet.bladder >= 100:
                game_over = True
                lose_reason = "Bladder overflow!"
            # Win condition - found all objects and not carrying toy
            if set(obj.name for obj in objects).issubset(set(pet.completed_targets.keys())) and not pet.carrying_toy:
                game_over = True
                won = True

        #   3.3.Draw the screen
        # screen.fill(WHITE)
        screen.blit(background_img, (0, 0))
        for obj in objects:
            obj.draw(screen, pet=pet)
        pet.draw(screen)
        # Control Button Disabled Status: Disable the Find Toy button if the pet has already taken the toy
        for btn in control_buttons:
            if btn.text == "Find Toy":
                btn.disabled = pet.carrying_toy or pet.toy_dropped
        # Draw button (hover effect + show)
        for btn in control_buttons:
            btn.check_hover(mouse_pos)
            btn.draw(screen)
        # Status bar on the right
        font_small = pygame.font.SysFont("Arial Unicode MS", 24)
        for i, (name, found) in enumerate(pet.completed_targets.items()):
            screen.blit(font_small.render(f"Found {name}", True, GREEN), (SCREEN_WIDTH - 150, 10 + i * 25))
        for i, (activity, count) in enumerate(attempts.items()):
            if activity in pet.completed_targets:
                status = "√"
                color = GREEN
            else:
                status = str(count)
                color = RED
            screen.blit(font_small.render(f"{activity}: {status}", True, color), (SCREEN_WIDTH - 150, 110 + i * 25))

        #   3.4.Judge the winner or loser
        if game_over:
            overlay = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.SRCALPHA)
            overlay.fill((0, 0, 0, 128))
            screen.blit(overlay, (0, 0))
            if won:
                message = "You Win! All tasks completed!"
                color = GREEN
            else:
                message = f"You Lose! {lose_reason}"
                color = RED
            text = font.render(message, True, color)
            screen.blit(text, (SCREEN_WIDTH // 2 - text.get_width() // 2, SCREEN_HEIGHT // 2 - 30))
            # restart
            restart_text = font.render("Press R to restart", True, WHITE)
            screen.blit(restart_text, (SCREEN_WIDTH // 2 - restart_text.get_width() // 2, SCREEN_HEIGHT // 2 + 20))

        pygame.display.flip()
        clock.tick(60)
    pygame.quit()


if __name__ == "__main__":
    main()
