import csv
import heapq
import itertools
import math
import os
import random
import time
from collections import deque

import pygame

# Initialize pygame
pygame.init()

# Screen dimensions
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 500
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Virtual Pet")

# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)  # Food objects
GREEN = (0, 255, 0)  # Toilet objects
BLUE = (0, 0, 255)  # Toy box
YELLOW = (255, 255, 0)  # Toys
PURPLE = (128, 0, 128)  # Bladder meter
GRAY = (200, 200, 200)  # energy meter

# the 4 Algorithm (that we want to compare)
ALGORITHMS = {
    "Potential Fields": "potential_fields",
    "Q-Learning": "q_learning",
    "Random Exploration": "random",
    "A* Search": "a_star"
}


# Load images
def load_and_scale_image(path, scale_ratio=1.0):
    try:
        image = pygame.image.load(path).convert_alpha()
        if scale_ratio != 1.0:
            width, height = image.get_size()
            new_size = (int(width * scale_ratio), int(height * scale_ratio))
            image = pygame.transform.scale(image, new_size)
        return image
    except Exception as e:
        print(f"[ERROR] Failed to load: {path} - {e}")
        fallback = pygame.Surface((40, 40))
        fallback.fill((255, 0, 0))  # Red background
        return fallback


background_img = load_and_scale_image("Image/background.png", scale_ratio=0.55)
pet_img = load_and_scale_image("Image/pet.png", scale_ratio=0.04)
pet_withToy_img = load_and_scale_image("Image/pet_withToy.png", scale_ratio=0.04)
feed_img = load_and_scale_image("Image/feed.png", scale_ratio=0.16)
toy_img = load_and_scale_image("Image/toy.png", scale_ratio=0.2)
toy_box_img = load_and_scale_image("Image/toy_box.png", scale_ratio=0.6)
toy_box_withToy_img = load_and_scale_image("Image/toy_box_withToy.png", scale_ratio=0.6)
toilet_img = load_and_scale_image("Image/toilet.png", scale_ratio=0.09)
sofa_img = load_and_scale_image("Image/sofa.png", scale_ratio=0.6)
table_img = load_and_scale_image("Image/table.png", scale_ratio=0.6)
television_img = load_and_scale_image("Image/television.png", scale_ratio=0.6)


# Button class
class Button:
    def __init__(self, x, y, width, height, text, color=GRAY, hover_color=BLUE, text_color=BLACK, disabled=False):
        self.rect = pygame.Rect(x, y, width, height)
        self.text = text
        self.color = color
        self.hover_color = hover_color
        self.text_color = text_color
        self.font = pygame.font.SysFont(None, 30)
        self.is_hovered = False
        self.disabled = disabled

    def draw(self, screen):
        if self.disabled:
            color = (180, 180, 180)  # GREY
        else:
            color = self.hover_color if self.is_hovered else self.color
        pygame.draw.rect(screen, color, self.rect)
        pygame.draw.rect(screen, BLACK, self.rect, 2)  # Border
        # Center text label
        text_surface = self.font.render(self.text, True, self.text_color)
        text_rect = text_surface.get_rect(center=self.rect.center)
        screen.blit(text_surface, text_rect)

    def check_hover(self, pos):
        # Check if mouse is over button
        self.is_hovered = self.rect.collidepoint(pos)
        return self.is_hovered

    def is_clicked(self, pos, event):
        if self.disabled:
            return False
        if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
            return self.rect.collidepoint(pos)
        return False


# Pet class with the 4 algorithms
class Pet:
    def __init__(self, algorithm="random"):
        self.x = SCREEN_WIDTH // 2
        self.y = SCREEN_HEIGHT // 2
        self.radius = 25

        # Movement
        self.base_speed = 2
        self.speed = self.base_speed
        self.is_moving = False
        self.direction = random.uniform(0, 2 * math.pi)  # radians弧度制

        # pet needs
        self.energy = 100  # Start with 100% energy
        self.bladder = 0  # Start with 0% bladder

        # Memory
        self.memory = {}  # Record the location of all passing targets
        self.completed_targets = {}  # targets that have been accomplished
        self.known_targets = {}  # Temporarily record the position of the current target

        # Task management
        self.carrying_toy = False
        self.toy_dropped = False
        self.current_activity = None
        self.task_times = {}
        self.total_task_time = 0
        self.activity_start_time = 0
        self.total_distance = 0
        self.collision_count = 0
        self.sense_range = 37.5
        self.algorithm = algorithm

        # Potential Fields parameters
        self.attraction_gain = 1  # Attractive force
        self.repulsion_gain = 150000  # Repulsive force
        self.repulsion_range = 100  # Repulsive force range
        self.epsilon_repulsion = 1e-2  # The repulsion force value changes drastically, and the parameter setting is slightly larger and more stable
        self.epsilon_attraction = 1e-5  # The attraction becomes less when it is very close

        # Q-Learning parameters
        self.q_table = {}  # Status-Action Q-value table
        self.grid_size = 30  # State discrete granularity: The smaller the more detailed
        self.learning_rate = 0.1  # weight of new experience
        self.discount_factor = 0.9  # how important future rewards are
        self.exploration_rate = 0.4  # 40% Random Moves
        self.exploration_decay = 0.995
        self.exploration_rate_min = 0.05

        # A* Search parameters
        self.astar_path = []
        self.astar_index = 0

        # Movement history for visualization
        self.movement_history = deque(maxlen=100)

    # 0.1.Click the button to start moving
    def start_activity(self, activity, objects):
        if activity == "toy" and (self.carrying_toy or self.toy_dropped):
            return  # carrying_toy or toy_dropped It won't look for it anymore
        if self.current_activity is None:
            self.current_activity = activity
            self.activity_start_time = time.time()
            self.is_moving = True
            self.speed = self.base_speed * 1.5
            for obj in objects:
                if obj.name == activity:
                    self.known_targets[activity] = (obj.x, obj.y)
                    self.direction = math.atan2(obj.y - self.y, obj.x - self.x)
                    break
            # Empty the A* path cache and force the path to be rebuilt
            self.astar_path = []
            self.astar_index = 0

    # 0.2.Core Movement Method
    def update(self, objects, obstacles):
        self.move(objects, obstacles)  # Invoke the movement logic
        self.sense_objects(objects)  # Invoke perceptual logic

    # 0.3.Movement logic (move with different algorithms)
    def move(self, objects, obstacles):
        if not self.is_moving or self.energy <= 0:
            return

        # potential_fields and q_learning cannot calculate the direction without a known_targets
        if self.current_activity and self.current_activity in self.known_targets:
            target_x, target_y = self.known_targets[self.current_activity]
            if self.algorithm == "potential_fields":
                self.move_with_potential_fields(target_x, target_y, objects, obstacles)
            elif self.algorithm == "q_learning":
                self.move_with_q_learning(target_x, target_y, objects, obstacles)
            elif self.algorithm == "a_star":
                self.move_with_a_star_search(objects, obstacles)
            else:
                self.move_with_random_exploration(obstacles)
        else:
            if self.algorithm == "a_star":
                self.move_with_a_star_search(objects, obstacles)
            else:
                self.move_with_random_exploration(obstacles)

        # Keep within bounds
        self.x = max(self.radius, min(self.x, SCREEN_WIDTH - self.radius))
        self.y = max(self.radius, min(self.y, SCREEN_HEIGHT - self.radius))

        # Consume energy only when moving
        if self.is_moving:
            self.energy = max(0, self.energy - 0.05)

    # 0.4.Perceptual logic
    #   0.4.1.determine whether the target is reached
    def sense_objects(self, objects):
        # Don't sense objects when not doing an activity
        if not self.current_activity:
            return

        for obj in objects:
            if obj.name == self.current_activity:
                dist = ((self.x - obj.x) ** 2 + (self.y - obj.y) ** 2) ** 0.5
                if dist < self.sense_range:
                    if obj.name not in self.completed_targets:
                        self.completed_targets[obj.name] = (obj.x, obj.y)
                    self.complete_activity(obj, objects)

    #   0.4.2.End the mission when you reach the objective
    def complete_activity(self, obj, objects):
        if obj.name in self.completed_targets:
            if obj.name == "feed":
                self.energy = min(100, self.energy + 30)
                self.bladder = min(100, self.bladder + 20)
            elif obj.name == "toilet":
                self.bladder = max(0, self.bladder - 40)
            elif obj.name == "toy":
                self.carrying_toy = True
                # Removes the toy from the list of objects, so that it disappears from the frame
                for i, o in enumerate(objects):
                    if o.name == "toy":
                        del objects[i]
                        break

            elif obj.name == "toy_box" and self.carrying_toy:
                self.carrying_toy = False
                self.toy_dropped = True
                self.completed_targets["toy_box"] = (obj.x, obj.y)

        self.current_activity = None
        self.is_moving = False
        self.speed = self.base_speed
        elapsed = time.time() - self.activity_start_time
        self.task_times[obj.name] = elapsed
        self.total_task_time += elapsed

    # 1.Potential Fields
    #   1.1.Calculates the direction and the step size of the movement
    def calculate_potential_field(self, target_x, target_y, objects, obstacles):
        # Attraction part
        dx = target_x - self.x
        dy = target_y - self.y
        distance = math.hypot(dx, dy) - self.radius

        # Classical Attraction (Linear or Gradient)
        force_att_x = self.attraction_gain * dx
        force_att_y = self.attraction_gain * dy
        total_force_x = force_att_x
        total_force_y = force_att_y

        # Repulsive part: from objects (not the current target)
        for obj in objects:
            if obj.name == self.current_activity:
                continue
            obj_dx = self.x - obj.x
            obj_dy = self.y - obj.y
            obj_dist = math.hypot(obj_dx, obj_dy) - self.radius - obj.radius

            if obj_dist < self.repulsion_range:
                repulsion_term = (1 / obj_dist - 1 / self.repulsion_range)
                repulsion_mag = self.repulsion_gain * repulsion_term / (obj_dist ** 2)
                total_force_x += repulsion_mag * (obj_dx / obj_dist)
                total_force_y += repulsion_mag * (obj_dy / obj_dist)

        # Repulsive part: from obstacle (round)
        for obs in obstacles:
            dx = self.x - obs.x
            dy = self.y - obs.y
            dist = math.hypot(dx, dy) - self.radius - obs.radius

            if dist < self.repulsion_range and dist > 1e-5:
                repulsion_term = (1 / dist - 1 / self.repulsion_range)
                repulsion_mag = self.repulsion_gain * repulsion_term / (dist ** 2)
                total_force_x += repulsion_mag * (dx / dist)
                total_force_y += repulsion_mag * (dy / dist)

        # Normalize force vectors
        force_magnitude = math.hypot(total_force_x, total_force_y)
        if force_magnitude < 1e-5:
            # Prevent jamming: Perturbation fine-tuning
            total_force_x += random.uniform(-0.05, 0.05)
            total_force_y += random.uniform(-0.05, 0.05)
            force_magnitude = math.hypot(total_force_x, total_force_y)

        total_force_x = total_force_x / force_magnitude * self.speed
        total_force_y = total_force_y / force_magnitude * self.speed

        return total_force_x, total_force_y

    #   1.2.Move
    def move_with_potential_fields(self, target_x, target_y, objects, obstacles):
        prev_x, prev_y = self.x, self.y  # Record the position before the move
        force_x, force_y = self.calculate_potential_field(target_x, target_y, objects, obstacles)
        self.x += force_x
        self.y += force_y
        # Accumulate the displacement of this step
        distance_moved = math.hypot(self.x - prev_x, self.y - prev_y)
        self.total_distance += distance_moved
        # Check if there is an obstacle attached to the current position
        self.movement_history.append((self.x, self.y))
        for obs in obstacles:
            if math.hypot(self.x - obs.x, self.y - obs.y) < (self.radius + obs.radius):
                self.collision_count += 1
                break  # Remember only once at a time

    # 2.Q-Learning
    #   2.1.Discrete network: Converts the distance difference between the current and the target into a "discrete network"
    def discretize_state(self, target_x, target_y):
        dx = int((target_x - self.x) / self.grid_size)
        dy = int((target_y - self.y) / self.grid_size)
        gx = int(self.x / self.grid_size)
        gy = int(self.y / self.grid_size)
        return f"{gx},{gy}|{dx},{dy}"

    #   2.2.Move and update the Q table with rewards and punishments
    def move_with_q_learning(self, target_x, target_y, objects, obstacles):
        def discretize_state(x, y, tx, ty):
            dx = int((tx - x) / self.grid_size)
            dy = int((ty - y) / self.grid_size)
            gx = int(x / self.grid_size)
            gy = int(y / self.grid_size)
            return f"{gx},{gy}|{dx},{dy}"

        current_state = discretize_state(self.x, self.y, target_x, target_y)
        current_distance = math.hypot(self.x - target_x, self.y - target_y)

        if random.random() < self.exploration_rate:
            action = random.choice(["up", "down", "left", "right"])
        else:
            if current_state not in self.q_table:
                self.q_table[current_state] = {"up": 0, "down": 0, "left": 0, "right": 0}
            action = max(self.q_table[current_state], key=self.q_table[current_state].get)

        # Simulate the action
        new_x, new_y = self.x, self.y
        if action == "up":
            new_y -= self.speed
        elif action == "down":
            new_y += self.speed
        elif action == "left":
            new_x -= self.speed
        elif action == "right":
            new_x += self.speed

        reward = 0
        collided = False
        # Record the original location before moving
        prev_x, prev_y = self.x, self.y

        # Boundary collisions
        if new_x < self.radius or new_x > SCREEN_WIDTH - self.radius or \
                new_y < self.radius or new_y > SCREEN_HEIGHT - self.radius:
            collided = True
            reward -= 10

        # Obstacle collision (circular obstacle)
        if not collided:
            for obs in obstacles:
                dist = math.hypot(new_x - obs.x, new_y - obs.y)
                if dist < self.radius + obs.radius:
                    collided = True
                    reward -= 10
                    self.collision_count += 1
                    break

        # Perform an action or be in place
        if not collided:
            self.x, self.y = new_x, new_y
            # Cumulative travel distance
            distance_moved = math.hypot(self.x - prev_x, self.y - prev_y)
            self.total_distance += distance_moved
            new_distance = math.hypot(self.x - target_x, self.y - target_y)
            if new_distance < 5:
                self.x, self.y = target_x, target_y
                reward += 200
                self.is_moving = False
                self.current_activity = None
            elif new_distance < current_distance:
                reward += 1
            else:
                reward -= 2
        else:
            # Stay where pet is, don't move
            new_distance = current_distance

        # Stuck Penalty & Jitter Attempt
        if len(self.movement_history) >= 5:
            recent = list(self.movement_history)[-5:]
            if all(math.hypot(x - self.x, y - self.y) < 4 for x, y in recent):
                reward -= 20  # Severely stuck penalty

                # Stuck: Try to fine-tune the direction to get out of trouble
                directions = ["up", "down", "left", "right"]
                random.shuffle(directions)
                for alt_action in directions:
                    alt_x, alt_y = self.x, self.y
                    if alt_action == "up":
                        alt_y -= self.speed * 2
                    elif alt_action == "down":
                        alt_y += self.speed * 2
                    elif alt_action == "left":
                        alt_x -= self.speed * 2
                    elif alt_action == "right":
                        alt_x += self.speed * 2

                    if all(math.hypot(alt_x - obs.x, alt_y - obs.y) >= self.radius + obs.radius for obs in obstacles):
                        self.x, self.y = alt_x, alt_y
                        break  # Successfully get out of trouble

        # Q table updated
        new_state = discretize_state(self.x, self.y, target_x, target_y)
        for state in (current_state, new_state):
            if state not in self.q_table:
                self.q_table[state] = {"up": 0, "down": 0, "left": 0, "right": 0}

        max_next_q = max(self.q_table[new_state].values())
        self.q_table[current_state][action] = (
                (1 - self.learning_rate) * self.q_table[current_state][action]
                + self.learning_rate * (reward + self.discount_factor * max_next_q)
        )

        self.exploration_rate = max(self.exploration_rate_min, self.exploration_rate * self.exploration_decay)
        self.movement_history.append((self.x, self.y))

    # 3.A* Search
    #   3.1.Generate paths
    def a_star_pathfinding(self, start_pos, goal_pos, obstacles):
        grid_size = 20
        start = (int(start_pos[0] // grid_size), int(start_pos[1] // grid_size))
        goal = (int(goal_pos[0] // grid_size), int(goal_pos[1] // grid_size))

        def neighbors(node):
            x, y = node
            return [(x + dx, y + dy) for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]]

        def heuristic(a, b):
            return abs(a[0] - b[0]) + abs(a[1] - b[1])  # Manhattan distance

        def is_blocked(x, y):
            px = x * grid_size + grid_size // 2
            py = y * grid_size + grid_size // 2
            for obs in obstacles:
                # Circular obstacle detection
                if math.hypot(px - obs.x, py - obs.y) < self.radius + obs.radius:
                    return True
            return False

        open_set = []
        heapq.heappush(open_set, (0, start))
        came_from = {}
        g_score = {start: 0}
        f_score = {start: heuristic(start, goal)}

        while open_set:
            _, current = heapq.heappop(open_set)
            if current == goal:
                path = []
                while current in came_from:
                    cx, cy = current
                    path.append((cx * grid_size + grid_size // 2, cy * grid_size + grid_size // 2))
                    current = came_from[current]
                path.reverse()
                return path

            for neighbor in neighbors(current):
                x, y = neighbor
                if 0 <= x < SCREEN_WIDTH // grid_size and 0 <= y < SCREEN_HEIGHT // grid_size:
                    if is_blocked(x, y):
                        continue
                    tentative_g = g_score[current] + 1
                    if neighbor not in g_score or tentative_g < g_score[neighbor]:
                        came_from[neighbor] = current
                        g_score[neighbor] = tentative_g
                        f_score[neighbor] = tentative_g + heuristic(neighbor, goal)
                        heapq.heappush(open_set, (f_score[neighbor], neighbor))

        return []  # No path

    #   3.2.Move
    def move_with_a_star_search(self, objects, obstacles):
        if self.current_activity not in self.known_targets:
            return  # Haven't started the mission yet

        target_x, target_y = self.known_targets[self.current_activity]

        # Initialize the A* path (generated only once)
        if not hasattr(self, "astar_path") or not self.astar_path:
            self.astar_path = self.a_star_pathfinding((self.x, self.y), (target_x, target_y), obstacles)
            self.astar_index = 0

        if not self.astar_path or self.astar_index >= len(self.astar_path):
            return

        # Gets the current target waypoint
        next_x, next_y = self.astar_path[self.astar_index]
        dx = next_x - self.x
        dy = next_y - self.y
        distance = math.hypot(dx, dy)
        # Record before moving
        prev_x, prev_y = self.x, self.y

        if distance < self.speed:
            # If the distance is too close, jump over it directly to prevent shaking
            self.x, self.y = next_x, next_y
            self.astar_index += 1
        else:
            # Move normally
            self.x += (dx / distance) * self.speed
            self.y += (dy / distance) * self.speed
        # The displacement is calculated after the move and accumulated
        distance_moved = math.hypot(self.x - prev_x, self.y - prev_y)
        self.total_distance += distance_moved
        self.movement_history.append((self.x, self.y))
        # Check if there is an obstacle attached to the current position
        for obs in obstacles:
            if math.hypot(self.x - obs.x, self.y - obs.y) < (self.radius + obs.radius):
                self.collision_count += 1
                break

    # 4.Random Exploration
    def move_with_random_exploration(self, obstacles):
        if not self.current_activity or self.current_activity not in self.known_targets:
            base_direction = self.direction + random.uniform(-0.5, 0.5)
        else:
            target_x, target_y = self.known_targets[self.current_activity]
            dx = target_x - self.x
            dy = target_y - self.y
            base_direction = math.atan2(dy, dx)

        # Obstacle avoidance attempts
        max_attempts = 8
        step_angle = math.pi / 4  # Change the angle at 45 degrees each attempt
        found = False

        for i in range(max_attempts):
            try_direction = base_direction + random.choice([-1, 1]) * step_angle * i
            new_x = self.x + math.cos(try_direction) * self.speed
            new_y = self.y + math.sin(try_direction) * self.speed

            # Determine if have hit a circular obstacle
            if not obstacles or all(
                    math.hypot(new_x - obs.x, new_y - obs.y) > self.radius + obs.radius for obs in obstacles
            ):
                self.direction = try_direction
                # Record before moving
                prev_x, prev_y = self.x, self.y
                self.x = new_x
                self.y = new_y
                # Cumulative displacement after moving
                distance_moved = math.hypot(self.x - prev_x, self.y - prev_y)
                self.total_distance += distance_moved
                self.movement_history.append((self.x, self.y))
                found = True
                break
            else:
                # Hit an obstacle in this direction, count as a collision!
                self.collision_count += 1

        if not found:
            # Wait where pet is and avoid going through walls
            self.movement_history.append((self.x, self.y))

    def draw(self, screen):
        # Draw movement path
        if len(self.movement_history) > 1:
            pygame.draw.lines(screen, (200, 200, 255), False, self.movement_history, 1)

        # color = YELLOW if self.carrying_toy else BLUE
        # pygame.draw.circle(screen, color, (int(self.x), int(self.y)), self.radius)
        if self.carrying_toy:
            img_to_draw = pet_withToy_img
        else:
            img_to_draw = pet_img
        img_rect = img_to_draw.get_rect(center=(int(self.x), int(self.y)))
        screen.blit(img_to_draw, img_rect)

        # Draw stats
        bar_width = 200
        pygame.draw.rect(screen, RED, (10, 10, self.energy * 2, 20))
        pygame.draw.rect(screen, PURPLE, (10, 40, self.bladder * 2, 20))

        font = pygame.font.SysFont(None, 24)
        screen.blit(font.render(f"Energy: {int(self.energy)}", True, BLACK), (220, 10))
        screen.blit(font.render(f"Bladder: {int(self.bladder)}", True, BLACK), (220, 40))

        # Draw current algorithm
        algo_text = f"Algorithm: {self.algorithm}"
        screen.blit(font.render(algo_text, True, BLACK), (10, SCREEN_HEIGHT - 30))

        if self.current_activity:
            task_elapsed = time.time() - self.activity_start_time
            screen.blit(font.render(f"Activity: {self.current_activity} ({task_elapsed:.1f}s)", True, BLACK), (10, 80))

        # Regardless of whether there is activity or not, the total time is displayed
        screen.blit(font.render(f"Total Time: {self.total_task_time:.1f}s", True, BLACK), (10, 100))


# Object class
class Object:
    def __init__(self, name, x, y):
        self.name = name
        self.x = x
        self.y = y
        self.radius = 25
        self.colors = {
            "toilet": GREEN,
            "toy": YELLOW,
            "feed": RED,
            "toy_box": BLUE
        }

    # Make sure that the objects do not overlap
    @classmethod
    def create_non_overlapping_objects(cls, names, min_distance=80):
        placed_objects = []
        for name in names:
            while True:
                x = random.randint(50, SCREEN_WIDTH - 50)
                y = random.randint(50, SCREEN_HEIGHT - 50)
                new_obj = cls(name, x, y)
                if all(math.hypot(x - obj.x, y - obj.y) >= min_distance for obj in placed_objects):
                    placed_objects.append(new_obj)
                    break
        return placed_objects

    def draw(self, screen, pet=None):
        # pygame.draw.circle(screen, self.colors[self.name], (int(self.x), int(self.y)), self.radius)
        # font = pygame.font.SysFont(None, 24)
        # text = font.render(self.name, True, BLACK)
        # screen.blit(text, (self.x - text.get_width() // 2, self.y + self.radius + 5))
        if self.name == "toy_box" and pet and pet.toy_dropped:
            icon = toy_box_withToy_img
        else:
            icon_map = {
                "feed": feed_img,
                "toilet": toilet_img,
                "toy": toy_img,
                "toy_box": toy_box_img
            }
            icon = icon_map.get(self.name)
        if icon:
            icon_rect = icon.get_rect(center=(int(self.x), int(self.y)))
            screen.blit(icon, icon_rect)


class Obstacle:
    def __init__(self, name, x, y, image=None):
        self.name = name
        self.x = x
        self.y = y
        self.image = image
        self.radius = 25

    def draw(self, screen):
        if self.image:
            img_rect = self.image.get_rect(center=(int(self.x), int(self.y)))
            screen.blit(self.image, img_rect)
        else:
            pygame.draw.circle(screen, (100, 100, 100), (int(self.x), int(self.y)), self.radius)

    @classmethod
    def create_non_overlapping_obstacles(cls, count, objects, min_distance=80):
        placed_obstacles = []

        obstacle_specs = [
            ("sofa", sofa_img),
            ("table", table_img),
            ("television", television_img)
        ]

        center_exclusion_zone = pygame.Rect(
            SCREEN_WIDTH // 2 - 37.5, SCREEN_HEIGHT // 2 - 37.5, 75, 75
        )

        for name, image in obstacle_specs[:count]:
            while True:
                x = random.randint(50, SCREEN_WIDTH - 50)
                y = random.randint(50, SCREEN_HEIGHT - 50)
                new_circle = pygame.Rect(x - 25, y - 25, 50, 50)  # bounding box

                # It doesn't overlap with an object, doesn't overlap with an existing barrier, and isn't in the center
                if (
                        not center_exclusion_zone.colliderect(new_circle)
                        and all(math.hypot(x - obj.x, y - obj.y) > min_distance for obj in objects)
                        and all(math.hypot(x - obs.x, y - obs.y) > min_distance for obs in placed_obstacles)
                ):
                    placed_obstacles.append(cls(name, x, y, image))
                    break

        return placed_obstacles


def show_algorithm_selection():
    screen.fill(WHITE)
    font_large = pygame.font.SysFont(None, 48)
    title = font_large.render("Select Search Algorithm", True, BLACK)
    screen.blit(title, (SCREEN_WIDTH // 2 - title.get_width() // 2, 50))

    buttons = []
    y_pos = 150
    for algo_name in ALGORITHMS.keys():
        btn = Button(SCREEN_WIDTH // 2 - 150, y_pos, 300, 50, algo_name)
        buttons.append(btn)
        y_pos += 70

    while True:
        mouse_pos = pygame.mouse.get_pos()

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
                return None
            for btn in buttons:
                btn.check_hover(mouse_pos)
                if btn.is_clicked(mouse_pos, event):
                    return ALGORITHMS[btn.text]

        for btn in buttons:
            btn.draw(screen)

        pygame.display.flip()
        pygame.time.delay(30)


def evaluate_performance(total_time, total_distance, collision_count):
    # Individual scores
    time_score = max(0, 100 - total_time * 2)
    distance_score = max(0, 100 - total_distance * 0.02)
    collision_score = max(0, 100 - collision_count * 10)

    # Overall score
    total_score = (time_score * 0.4) + (distance_score * 0.3) + (collision_score * 0.3)
    return total_score


# Parameter list
attraction_list = [0.5, 0.75, 1.0, 1.25, 1.5]
repulsion_list = [500, 1000, 2000, 3000, 4000, 5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000]
# Arrange and combine the above parameters
parameter_combinations = list(itertools.product(attraction_list, repulsion_list))
# Current testing progress
current_param_index = 0  # Current number of rounds
current_round = 0  # Current number of rounds (0~4)


def main():
    global current_param_index, current_round, parameter_combinations
    # If all parameters have been tested, exit
    if current_param_index >= len(parameter_combinations):
        print("All parameter combinations tested. Exiting.")
        pygame.quit()
        return
    # 1.Algorithm selection
    # Force selection of Potential, no need to pop up the box again
    selected_algorithm = "potential_fields"
    attraction_gain, repulsion_gain = parameter_combinations[current_param_index]
    print(f"Testing param set {current_param_index + 1}/{len(parameter_combinations)} - Round {current_round + 1}/5")

    # 2.To create instance: pet, object, button
    clock = pygame.time.Clock()
    pet = Pet(selected_algorithm)
    pet.attraction_gain = attraction_gain
    pet.repulsion_gain = repulsion_gain
    objects = [
        Object("feed", 70, 430),
        Object("toilet", 530, 430),
        Object("toy", 360, 400),
        Object("toy_box", 530, 90)
    ]
    # obstacles = Obstacle.create_non_overlapping_obstacles(3, objects)
    obstacles = [
        Obstacle("sofa", 195, 350, sofa_img),
        Obstacle("television", 500, 280, television_img),
        Obstacle("table", 350, 180, table_img)
    ]
    control_buttons = [
        Button(10, 130, 150, 40, "Feed"),
        Button(10, 180, 150, 40, "Find Toy"),
        Button(10, 230, 150, 40, "Find Box"),
        Button(10, 280, 150, 40, "Toilet")
    ]
    font = pygame.font.SysFont(None, 36)
    game_over = False
    won = False
    attempts = {activity: 0 for activity in ["feed", "toy", "toy_box", "toilet"]}
    running = True
    # Define the order of tasks
    task_sequence = ["feed", "toy", "toy_box", "toilet"]
    current_task_index = 0
    auto_mode = True  # Turn on auto-click mode
    write_done = False  # Prevent CSV from being written multiple times

    # 3.The main loop of the game
    while running:
        #   3.1.Get the event
        mouse_pos = pygame.mouse.get_pos()
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False
            if not game_over:
                for btn in control_buttons:
                    if btn.is_clicked(mouse_pos, event):
                        activity_map = {
                            "Feed": "feed",
                            "Find Toy": "toy",
                            "Find Box": "toy_box",
                            "Toilet": "toilet"
                        }
                        activity = activity_map[btn.text]
                        # If already find the toy, ignore the click
                        if activity == "toy" and pet.carrying_toy:
                            continue
                        pet.start_activity(activity, objects)
                        attempts[activity] += 1
            if game_over and event.type == pygame.KEYDOWN and event.key == pygame.K_r:
                return main()  # Restart game

        #   3.2.PET move
        if not game_over:
            pet.update(objects, obstacles)
            # Auto-click
            if auto_mode and not pet.current_activity and current_task_index < len(task_sequence):
                next_task = task_sequence[current_task_index]
                pet.start_activity(next_task, objects)
                attempts[next_task] += 1
                current_task_index += 1
            # Game over conditions
            if pet.energy <= 0:
                game_over = True
                lose_reason = "Ran out of energy!"
            elif pet.bladder >= 100:
                game_over = True
                lose_reason = "Bladder overflow!"
            # Win condition - found all objects and not carrying toy
            if set(obj.name for obj in objects).issubset(set(pet.completed_targets.keys())) and not pet.carrying_toy:
                game_over = True
                won = True

        #   3.3.Draw the screen
        # screen.fill(WHITE)
        screen.blit(background_img, (0, 0))
        for obj in objects:
            obj.draw(screen, pet=pet)
        for obs in obstacles:
            obs.draw(screen)
        pet.draw(screen)
        # Control Button Disabled Status: Disable the Find Toy button if the pet has already taken the toy
        for btn in control_buttons:
            if btn.text == "Find Toy":
                btn.disabled = pet.carrying_toy or pet.toy_dropped
        # Draw button (hover effect + show)
        for btn in control_buttons:
            btn.check_hover(mouse_pos)
            btn.draw(screen)
        # Status bar on the right
        font_small = pygame.font.SysFont("Arial Unicode MS", 24)
        for i, (name, found) in enumerate(pet.completed_targets.items()):
            screen.blit(font_small.render(f"Found {name}", True, GREEN), (SCREEN_WIDTH - 150, 10 + i * 25))
        for i, (activity, count) in enumerate(attempts.items()):
            if activity in pet.completed_targets:
                elapsed = pet.task_times.get(activity, 0)
                status = f"√ ({elapsed:.1f}s)"
                color = GREEN
            else:
                status = str(count)
                color = RED
            screen.blit(font_small.render(f"{activity}: {status}", True, color), (SCREEN_WIDTH - 150, 110 + i * 25))

        #   3.4.Judge the winner or loser
        if game_over:
            overlay = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.SRCALPHA)
            overlay.fill((0, 0, 0, 128))
            screen.blit(overlay, (0, 0))
            if won:
                message = "You Win! All tasks completed!"
                color = GREEN
            else:
                message = f"You Lose! {lose_reason}"
                color = RED
            text = font.render(message, True, color)
            screen.blit(text, (SCREEN_WIDTH // 2 - text.get_width() // 2, SCREEN_HEIGHT // 2 - 30))
            # Displays statistics
            total_time = pet.total_task_time
            total_distance = pet.total_distance
            total_collisions = pet.collision_count
            score = evaluate_performance(total_time, total_distance, total_collisions)
            info1 = font_small.render(f"Total Time: {total_time:.1f} s", True, WHITE)
            info2 = font_small.render(f"Total Distance: {total_distance:.1f} px", True, WHITE)
            info3 = font_small.render(f"Collisions: {total_collisions}", True, WHITE)
            info4 = font_small.render(f"Score: {score:.1f}", True, WHITE)
            screen.blit(info1, (SCREEN_WIDTH // 2 - info1.get_width() // 2, SCREEN_HEIGHT // 2 + 70))
            screen.blit(info2, (SCREEN_WIDTH // 2 - info2.get_width() // 2, SCREEN_HEIGHT // 2 + 100))
            screen.blit(info3, (SCREEN_WIDTH // 2 - info3.get_width() // 2, SCREEN_HEIGHT // 2 + 130))
            screen.blit(info4, (SCREEN_WIDTH // 2 - info4.get_width() // 2, SCREEN_HEIGHT // 2 + 160))
            # restart
            restart_text = font.render("Press R to restart", True, WHITE)
            screen.blit(restart_text, (SCREEN_WIDTH // 2 - restart_text.get_width() // 2, SCREEN_HEIGHT // 2 + 20))
            # Write to csv
            if not write_done:
                if selected_algorithm == "potential_fields":
                    filename = 'potential_fields_parameter.csv'
                    file_exists = os.path.isfile(filename)
                    with open(filename, 'a', newline='') as csvfile:
                        fieldnames = ['Algorithm', 'AttractionGain', 'RepulsionGain', 'Round', 'TotalTime',
                                      'TotalDistance', 'Collisions', 'Score']
                        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                        if not file_exists:
                            writer.writeheader()
                        writer.writerow({
                            'Algorithm': selected_algorithm,
                            'AttractionGain': pet.attraction_gain,
                            'RepulsionGain': pet.repulsion_gain,
                            'Round': current_round + 1,  # Note that the first round is 1, not 0
                            'TotalTime': total_time,
                            'TotalDistance': total_distance,
                            'Collisions': total_collisions,
                            'Score': score
                        })
                write_done = True
            if write_done:
                pygame.display.flip()
                pygame.time.delay(1000)  # Pause for 1 second, see the result
                current_round += 1
                if current_round >= 5:  # Completed 5 rounds
                    current_param_index += 1
                    current_round = 0
                return main()
        pygame.display.flip()
        clock.tick(60)
    pygame.quit()


if __name__ == "__main__":
    main()
