from __future__ import annotations

from abc import ABC, abstractmethod
from collections import OrderedDict

from model import (
    MEM_BANDWIDTH,
    Settings,
    Request,
    Action,
    ReloadAction,
    VisitAction,
    OffloadAction,
    FinAction,
)
from utils.intervals import IntervalSet


class Algorithm(ABC):
    def __init__(self, settings: Settings):  # noqa: E741
        self.settings = settings

    @abstractmethod
    def fit(self, requests: list[Request]) -> list[Action]:
        raise NotImplementedError("The fit method must be implemented by the subclass.")

    @staticmethod
    def _group_requests_by_start_time(
        requests: list[Request],
    ) -> list[tuple[int, list[tuple[int, Request]]]]:
        """Group requests by their start time and return as a list of (start_time, requests) tuples."""
        grouped_requests: dict[int, list[tuple[int, Request]]] = OrderedDict()
        for i, req in enumerate(requests):
            if req.start not in grouped_requests:
                grouped_requests[req.start] = []
            grouped_requests[req.start].append((i, req))
        return list(grouped_requests.items())

    def _perform_eviction(
        self,
        actions: list[Action],
        current_time: int,
        evict_start: int,
        size_to_evict: int,
        loaded: IntervalSet,
    ) -> int:
        """Perform eviction operation and update time."""
        actions.append(
            OffloadAction(self.settings, current_time, evict_start, size_to_evict)
        )
        current_time += MEM_BANDWIDTH * size_to_evict
        loaded.remove(evict_start, evict_start + size_to_evict)
        return current_time

    def _perform_reload(
        self,
        actions: list[Action],
        current_time: int,
        region_addr: int,
        region_size: int,
        loaded: IntervalSet,
    ) -> int:
        """Perform reload operation and update time."""
        actions.append(
            ReloadAction(self.settings, current_time, region_addr, region_size)
        )
        current_time += MEM_BANDWIDTH * region_size
        loaded.add(region_addr, region_addr + region_size)
        return current_time

    @staticmethod
    def _find_unreferenced_regions(
        loaded: IntervalSet, referenced: IntervalSet
    ) -> list[tuple[int, int]]:
        """Find all unreferenced regions within loaded memory."""
        unreferenced_regions = []
        for start_addr, end_addr in loaded.intervals:
            unreferenced_parts = referenced.difference(start_addr, end_addr)
            unreferenced_regions.extend(unreferenced_parts)
        return unreferenced_regions

    @staticmethod
    def _release_references(
        reqs: list[tuple[int, Request]], referenced: IntervalSet
    ) -> None:
        """Release references for a batch of requests."""
        for i, req in reqs:
            referenced.remove(req.addr, req.addr + req.size)

    @classmethod
    def list_names(cls) -> list[str]:
        return [
            "simple",
            "predictor",
            "prophet",
        ]

    @classmethod
    def best_name(cls) -> str:
        return "prophet"

    @classmethod
    def from_name(cls, name: str, settings: Settings) -> Algorithm:
        if name == "simple":
            return SimpleAlgorithm(settings)
        elif name == "predictor":
            return PredictorAlgorithm(settings)
        elif name == "prophet":
            return ProphetAlgorithm(settings)
        else:
            raise ValueError(f"Unknown algorithm name: {name}")


class SimpleAlgorithm(Algorithm):
    """Simple memory management algorithm with byte-level granularity tracking using IntervalSet"""

    def __init__(self, settings: Settings):
        super().__init__(settings)
        # Track loaded memory regions using efficient IntervalSet
        self.loaded = IntervalSet()
        # Track referenced memory regions (currently in use)
        self.referenced = IntervalSet()
        self.max_memory = settings.M

    def _mark_referenced(self, start_addr: int, end_addr: int):
        """Mark bytes as referenced to prevent eviction"""
        self.referenced.add(start_addr, end_addr)

    def _unmark_referenced(self, start_addr: int, end_addr: int):
        """Unmark bytes as referenced"""
        self.referenced.remove(start_addr, end_addr)

    def _load_bytes(
        self, start_addr: int, size: int, actions: list[Action], current_time: int
    ) -> int:
        """Load the specified byte range, return the new time"""
        end_addr = start_addr + size

        # First mark bytes as referenced
        self._mark_referenced(start_addr, end_addr)

        # Find unloaded regions that need to be loaded
        unloaded_regions = self.loaded.difference(start_addr, end_addr)

        # Load unloaded regions
        for region_addr, region_end in unloaded_regions:
            region_size = region_end - region_addr
            current_time = self._load_region(
                region_addr, region_size, actions, current_time
            )

        return current_time

    def _load_region(
        self, addr: int, size: int, actions: list[Action], current_time: int
    ) -> int:
        """Load a contiguous memory region, evict bytes first if necessary"""
        # Evict enough memory if needed
        while self.loaded.total() + size > self.max_memory:
            current_time = self._evict_bytes(actions, current_time)

        # Load the region
        return self._perform_reload(actions, current_time, addr, size, self.loaded)

    def _evict_bytes(self, actions: list[Action], current_time: int) -> int:
        """Evict unreferenced bytes, choosing contiguous regions to minimize offload operations"""
        unreferenced_regions = self._find_unreferenced_regions(
            self.loaded, self.referenced
        )

        if unreferenced_regions:
            # Evict the first unreferenced contiguous region found
            evict_start, evict_end = unreferenced_regions[0]
            region_size = evict_end - evict_start
            return self._perform_eviction(
                actions, current_time, evict_start, region_size, self.loaded
            )

        raise RuntimeError(
            "No bytes available to evict! All loaded bytes are referenced."
        )

    def fit(self, requests: list[Request]) -> list[Action]:
        """Execute the byte-granularity algorithm"""
        actions: list[Action] = []
        current_time = 0

        # Group requests by start time
        request_groups = self._group_requests_by_start_time(requests)

        for start, reqs in request_groups:
            # Load all required bytes for all requests in this group
            load_time = current_time
            for i, req in reqs:
                load_time = self._load_bytes(req.addr, req.size, actions, load_time)

            # Wait until start time (all loads must complete before start time)
            current_time = max(load_time, start)

            # Execute visit operations concurrently (all start at the same time)
            spend_time = 0
            for i, req in reqs:
                actions.append(VisitAction(self.settings, current_time, i, req))
                spend_time = max(spend_time, req.time)

            current_time += spend_time

            # Release byte references
            self._release_references(reqs, self.referenced)

        actions.append(FinAction(current_time))
        return actions


class PredictorAlgorithm(Algorithm):
    """
    Predictor Algorithm: Online algorithm that predicts future memory access patterns.

    Key features:
    - Online processing: Only uses past requests, cannot look into the future
    - LRU-based eviction: Evict least recently used memory when space is needed
    - Access frequency tracking: Keep frequently accessed memory loaded
    - Pattern-based prefetching: Predict and prefetch likely future accesses
    - IO/Compute overlap: Load next batch during current batch computation
    """

    def __init__(self, settings: Settings):
        super().__init__(settings)
        self.loaded = IntervalSet()
        self.referenced = IntervalSet()
        self.max_memory = settings.M

        # Track access time for each loaded interval
        # Use actual request time for accurate LRU tracking
        self.interval_access_time: dict[tuple[int, int], int] = {}

    def _record_region_access(self, start_addr: int, end_addr: int, access_time: int):
        """Record access for a specific memory region"""
        # Store the actual access time for accurate LRU
        key = (start_addr, end_addr)
        self.interval_access_time[key] = access_time

    def _find_lru_eviction_candidate(self, current_time: int) -> tuple[int, int] | None:
        """
        Find the least recently used unreferenced memory region to evict.

        Returns: (start, end) of the LRU region, or None if no evictable region exists
        """
        best_region = None
        earliest_access = float("inf")

        # Find unreferenced loaded regions
        unreferenced_regions = self._find_unreferenced_regions(
            self.loaded, self.referenced
        )

        # Match each unreferenced region to its parent interval for access time lookup
        for region_start, region_end in unreferenced_regions:
            # Find the parent loaded interval
            for start_addr, end_addr in self.loaded.intervals:
                if start_addr <= region_start and region_end <= end_addr:
                    key = (start_addr, end_addr)
                    last_access = self.interval_access_time.get(key, 0)

                    # Choose region with earliest last access (LRU)
                    if last_access < earliest_access:
                        earliest_access = last_access
                        best_region = (region_start, region_end)
                    break

        return best_region

    def _evict_bytes_lru(
        self,
        actions: list[Action],
        current_time: int,
        size_needed: int | None = None,
    ) -> int:
        """
        Evict bytes using LRU algorithm.

        Args:
            size_needed: If specified, only evict this many bytes from the LRU region

        Returns: Updated current time after eviction
        """
        best_region = self._find_lru_eviction_candidate(current_time)

        if best_region is None:
            raise RuntimeError(
                "No bytes available to evict! All loaded bytes are referenced."
            )

        evict_start, evict_end = best_region
        region_size = evict_end - evict_start

        # If size_needed is specified, only evict what we need
        if size_needed is not None:
            size_to_evict = min(size_needed, region_size)
        else:
            size_to_evict = region_size

        current_time = self._perform_eviction(
            actions, current_time, evict_start, size_to_evict, self.loaded
        )

        # Clean up access history for evicted region
        evicted_end = evict_start + size_to_evict
        keys_to_remove = []
        for key in list(self.interval_access_time.keys()):
            hist_start, hist_end = key
            # If history entry is completely within evicted region, remove it
            if hist_start >= evict_start and hist_end <= evicted_end:
                keys_to_remove.append(key)

        for key in keys_to_remove:
            del self.interval_access_time[key]

        return current_time

    def _load_bytes(
        self,
        start_addr: int,
        size: int,
        actions: list[Action],
        current_time: int,
        access_time: int,
    ) -> int:
        """
        Load memory bytes, evicting with LRU if necessary.

        Args:
            access_time: The time when this memory will be accessed (for history tracking)

        Returns: Updated current time after loading
        """
        end_addr = start_addr + size

        # Mark as referenced
        self.referenced.add(start_addr, end_addr)

        # Find unloaded regions
        unloaded_regions = self.loaded.difference(start_addr, end_addr)

        # Load unloaded regions
        for region_addr, region_end in unloaded_regions:
            region_size = region_end - region_addr

            # Evict if necessary using LRU strategy
            while self.loaded.total() + region_size > self.max_memory:
                space_needed = self.loaded.total() + region_size - self.max_memory
                current_time = self._evict_bytes_lru(
                    actions, current_time, space_needed
                )

            # Before adding to loaded, save old intervals that might be affected
            affected_intervals = []
            for interval_start, interval_end in self.loaded.intervals:
                if interval_end >= region_addr and interval_start <= region_end:
                    affected_intervals.append((interval_start, interval_end))

            # Load the region (this will add to loaded set)
            current_time = self._perform_reload(
                actions, current_time, region_addr, region_size, self.loaded
            )

            # Update access times: remove old intervals and add new merged interval
            old_access_time = access_time
            for old_start, old_end in affected_intervals:
                old_key = (old_start, old_end)
                if old_key in self.interval_access_time:
                    old_access_time = max(
                        old_access_time, self.interval_access_time[old_key]
                    )
                    del self.interval_access_time[old_key]

            # Find the new merged interval and set its access time
            for interval_start, interval_end in self.loaded.intervals:
                if interval_start <= region_addr and interval_end >= region_end:
                    key = (interval_start, interval_end)
                    self.interval_access_time[key] = old_access_time
                    break

        return current_time

    def _try_prefetch_similar_patterns(
        self,
        current_addr: int,
        current_size: int,
        actions: list[Action],
        start_time: int,
        deadline: int,
    ) -> int:
        """
        Try to prefetch memory regions with similar access patterns.
        This is a simple heuristic: prefetch adjacent or frequently accessed regions.

        Returns: Time when prefetching completes (or start_time if no prefetching done)
        """
        load_time = start_time

        # Heuristic 1: Prefetch adjacent memory (spatial locality)
        # Try to prefetch the next contiguous region
        adjacent_start = current_addr + current_size
        prefetch_size = min(current_size, self.max_memory // 10)  # Limit prefetch size

        # Ensure we don't exceed virtual memory boundary
        max_prefetch_size = self.settings.L - adjacent_start
        prefetch_size = min(prefetch_size, max_prefetch_size)

        if prefetch_size > 0:
            adjacent_end = adjacent_start + prefetch_size

            # Check if we can prefetch during available time
            unloaded = self.loaded.difference(adjacent_start, adjacent_end)
            load_cost = sum((end - start) * MEM_BANDWIDTH for start, end in unloaded)

            if load_time + load_cost <= deadline:
                free_memory = self.max_memory - self.loaded.total()
                needed_space = sum(end - start for start, end in unloaded)

                if needed_space > 0 and needed_space <= free_memory:
                    # Prefetch adjacent region
                    for region_start, region_end in unloaded:
                        region_size = region_end - region_start
                        actions.append(
                            ReloadAction(
                                self.settings, load_time, region_start, region_size
                            )
                        )
                        load_time += MEM_BANDWIDTH * region_size
                        self.loaded.add(region_start, region_end)

                        # Record access time for prefetched region
                        for interval_start, interval_end in self.loaded.intervals:
                            if (
                                interval_start <= region_start
                                and interval_end >= region_end
                            ):
                                key = (interval_start, interval_end)
                                # Use deadline as access time for prefetched data
                                self.interval_access_time[key] = max(
                                    self.interval_access_time.get(key, 0), deadline
                                )
                                break

        return load_time

    def fit(self, requests: list[Request]) -> list[Action]:
        """
        Execute the predictor algorithm with LRU eviction and pattern-based prefetching.

        Algorithm (Online):
        1. Process requests as they arrive (grouped by start time)
        2. Use LRU for eviction decisions (based on past access history)
        3. Prefetch adjacent/similar regions during compute time
        4. Leverage IO/Compute overlap when possible
        """
        actions: list[Action] = []
        io_end_time = 0  # Track when IO operations complete
        compute_end_time = 0  # Track when compute operations complete

        # Group requests by start time
        request_groups = self._group_requests_by_start_time(requests)

        for group_idx, (start, reqs) in enumerate(request_groups):
            # Load current batch (wait for previous IO and compute to complete)
            load_start_time = max(io_end_time, compute_end_time)
            load_time = load_start_time

            for i, req in reqs:
                load_time = self._load_bytes(
                    req.addr, req.size, actions, load_time, start
                )

            io_end_time = load_time

            # Execute Visit operations (all start together, complete together)
            visit_start_time = max(io_end_time, compute_end_time, start)
            batch_duration = reqs[0][1].time  # All requests in batch have same duration
            for i, req in reqs:
                actions.append(VisitAction(self.settings, visit_start_time, i, req))

            compute_end_time = visit_start_time + batch_duration

            # Try prefetching during compute time (based on patterns)
            available_time = compute_end_time - io_end_time

            if available_time > 0:
                # Try to prefetch based on the last request in this batch
                last_req = reqs[-1][1]
                new_io_time = self._try_prefetch_similar_patterns(
                    last_req.addr, last_req.size, actions, io_end_time, compute_end_time
                )
                io_end_time = new_io_time

            # Release references for current batch
            self._release_references(reqs, self.referenced)

        # Final time is when both IO and compute are done
        final_time = max(io_end_time, compute_end_time)
        actions.append(FinAction(final_time))

        # Sort actions by time to ensure chronological order
        actions.sort(key=lambda a: a.time)

        return actions


class ProphetAlgorithm(Algorithm):
    """
    Prophet Algorithm: Enhances Predictor with future knowledge for optimal decisions.

    Based on Predictor's successful architecture, but uses:
    - Belady's optimal eviction instead of LRU (knows future accesses)
    - Smarter prefetching based on actual future requests
    - Same batch processing and IO/Compute overlap strategy

    Optimizations:
    - Incremental cache updates as requests are processed
    """

    def __init__(self, settings: Settings):
        super().__init__(settings)
        self.loaded = IntervalSet()
        self.referenced = IntervalSet()
        self.max_memory = settings.M
        # All requests for global view
        self.all_requests: list[Request] = []
        self.current_group_idx = 0
        # Cache: (start_addr, end_addr, after_idx) -> next_use_index
        self.next_use_cache: dict[tuple[int, int, int], float] = {}

    def _build_request_index(self, requests: list[Request]):
        """Build index of all requests for future lookups"""
        self.all_requests = requests
        # Clear cache when rebuilding index
        self.next_use_cache.clear()

    def _get_next_use_index(
        self, start_addr: int, end_addr: int, after_group_idx: int
    ) -> float:
        """
        Find when this memory region will next be used (group index).
        Returns inf if never used again.
        Uses caching to avoid repeated linear scans.
        """
        cache_key = (start_addr, end_addr, after_group_idx)

        # Check cache first
        if cache_key in self.next_use_cache:
            return self.next_use_cache[cache_key]

        # Look through future requests to find next use
        for idx in range(after_group_idx, len(self.all_requests)):
            req = self.all_requests[idx]
            # Check if request overlaps with this region
            if req.addr < end_addr and req.addr + req.size > start_addr:
                self.next_use_cache[cache_key] = idx
                return idx

        # Not used again
        self.next_use_cache[cache_key] = float("inf")
        return float("inf")

    def _invalidate_cache_for_index(self, processed_idx: int):
        """
        Invalidate cache entries that are no longer valid after processing request at processed_idx.
        Only keeps cache entries with after_idx > processed_idx.
        """
        keys_to_remove = [
            key
            for key in self.next_use_cache.keys()
            if key[2] <= processed_idx  # key[2] is after_group_idx
        ]
        for key in keys_to_remove:
            del self.next_use_cache[key]

    def _will_be_used_again(self, start_addr: int, end_addr: int) -> bool:
        """Check if a memory region will be used again in the future."""
        next_use = self._get_next_use_index(
            start_addr, end_addr, self.current_group_idx
        )
        return next_use != float("inf")

    def _find_optimal_eviction_candidate(self) -> tuple[int, int] | None:
        """
        Find memory region to evict using Belady's optimal algorithm.
        Priority: 1) Never used again (inf), 2) Used furthest in the future.
        """
        best_region = None
        latest_use = -1
        never_used_again = None

        # Find unreferenced loaded regions
        unreferenced_regions = self._find_unreferenced_regions(
            self.loaded, self.referenced
        )

        for region_start, region_end in unreferenced_regions:
            # Find next use for this region (cached lookup)
            next_use = self._get_next_use_index(
                region_start, region_end, self.current_group_idx
            )

            # Prioritize regions that will never be used again
            if next_use == float("inf"):
                never_used_again = (region_start, region_end)
                break  # Found best candidate, no need to continue

            # Choose region with latest next use
            if next_use > latest_use:
                latest_use = next_use
                best_region = (region_start, region_end)

        return never_used_again if never_used_again else best_region

    def _evict_bytes_optimal(
        self, actions: list[Action], current_time: int, size_needed: int | None = None
    ) -> int:
        """Evict bytes using Belady's optimal algorithm"""
        best_region = self._find_optimal_eviction_candidate()

        if best_region is None:
            raise RuntimeError(
                "No bytes available to evict! All loaded bytes are referenced."
            )

        evict_start, evict_end = best_region
        region_size = evict_end - evict_start

        if size_needed is not None:
            size_to_evict = min(size_needed, region_size)
        else:
            size_to_evict = region_size

        return self._perform_eviction(
            actions, current_time, evict_start, size_to_evict, self.loaded
        )

    def _load_bytes(
        self, start_addr: int, size: int, actions: list[Action], current_time: int
    ) -> int:
        """Load memory bytes, evicting optimally if necessary"""
        end_addr = start_addr + size

        # Mark as referenced
        self.referenced.add(start_addr, end_addr)

        # Find unloaded regions
        unloaded_regions = self.loaded.difference(start_addr, end_addr)

        # Load unloaded regions
        for region_addr, region_end in unloaded_regions:
            region_size = region_end - region_addr

            # Evict if necessary using optimal strategy
            while self.loaded.total() + region_size > self.max_memory:
                space_needed = self.loaded.total() + region_size - self.max_memory
                current_time = self._evict_bytes_optimal(
                    actions, current_time, space_needed
                )

            # Load the region
            current_time = self._perform_reload(
                actions, current_time, region_addr, region_size, self.loaded
            )

        return current_time

    def _try_prefetch_next_batch(
        self,
        next_group_reqs: list[tuple[int, Request]],
        actions: list[Action],
        start_time: int,
        deadline: int,
    ) -> int:
        """
        Maximize IO utilization during idle time by:
        1. Collecting all regions to prefetch (sorted by priority)
        2. Interleaving eviction and prefetch operations
        3. Evicting never-used-again memory to make space
        4. Prefetching high-priority regions
        5. Continuing until deadline is reached
        """
        load_time = start_time

        # Step 1: Collect all unloaded regions with their metadata
        regions_to_load = []
        for idx, next_req in next_group_reqs:
            unloaded = self.loaded.difference(
                next_req.addr, next_req.addr + next_req.size
            )
            for region_addr, region_end in unloaded:
                # Check if this region will be used again
                if self._will_be_used_again(region_addr, region_end):
                    next_use = self._get_next_use_index(
                        region_addr, region_end, self.current_group_idx + 1
                    )
                    regions_to_load.append(
                        (
                            region_addr,
                            region_end,
                            next_use,
                            idx,  # request index for tie-breaking
                        )
                    )

        # Step 2: Sort by next use time (earlier use = higher priority)
        regions_to_load.sort(key=lambda x: (x[2], x[3]))

        # Step 3: Interleave eviction and prefetch to maximize idle time usage
        region_idx = 0
        while load_time < deadline and region_idx < len(regions_to_load):
            time_remaining = deadline - load_time
            if time_remaining <= 0:
                break

            # Try to evict never-used-again memory first
            evictable = self._find_optimal_eviction_candidate()
            if evictable is not None:
                evict_start, evict_end = evictable
                evict_size = evict_end - evict_start
                next_evict_use = self._get_next_use_index(
                    evict_start, evict_end, self.current_group_idx
                )

                # Evict if it won't be used again and we have time
                if next_evict_use == float("inf"):
                    evict_time_needed = evict_size * MEM_BANDWIDTH
                    if load_time + evict_time_needed <= deadline:
                        load_time = self._perform_eviction(
                            actions, load_time, evict_start, evict_size, self.loaded
                        )
                        continue  # After eviction, try next operation

            # Try to prefetch next region
            if region_idx < len(regions_to_load):
                region_addr, region_end, next_use, _ = regions_to_load[region_idx]

                # Check if already loaded
                already_loaded = self.loaded.difference(region_addr, region_end)
                if not already_loaded:  # Region is fully loaded
                    region_idx += 1
                    continue

                # Try to load unloaded parts
                loaded_something = False
                for unloaded_start, unloaded_end in already_loaded:
                    unloaded_size = unloaded_end - unloaded_start
                    time_remaining = deadline - load_time
                    if time_remaining <= 0:
                        break

                    max_size_by_time = int(time_remaining / MEM_BANDWIDTH)
                    free_memory = self.max_memory - self.loaded.total()

                    # Evict if needed to make space
                    while (
                        free_memory < unloaded_size and free_memory < max_size_by_time
                    ):
                        evictable = self._find_optimal_eviction_candidate()
                        if evictable is None:
                            break
                        evict_start, evict_end = evictable
                        evict_size = evict_end - evict_start

                        evict_time_needed = evict_size * MEM_BANDWIDTH
                        if load_time + evict_time_needed > deadline:
                            break

                        load_time = self._perform_eviction(
                            actions, load_time, evict_start, evict_size, self.loaded
                        )
                        free_memory = self.max_memory - self.loaded.total()

                    max_loadable_size = min(
                        unloaded_size, max_size_by_time, free_memory
                    )

                    if max_loadable_size > 0:
                        load_time = self._perform_reload(
                            actions,
                            load_time,
                            unloaded_start,
                            max_loadable_size,
                            self.loaded,
                        )
                        loaded_something = True
                        break  # Move to next region after loading something

                if loaded_something or not already_loaded:
                    region_idx += 1
                else:
                    # Can't load this region, try next
                    region_idx += 1

        # Step 4: Continue evicting useless memory if time remains
        while load_time < deadline:
            evictable = self._find_optimal_eviction_candidate()
            if evictable is None:
                break

            evict_start, evict_end = evictable
            evict_size = evict_end - evict_start
            next_evict_use = self._get_next_use_index(
                evict_start, evict_end, self.current_group_idx
            )

            if next_evict_use == float("inf"):
                evict_time_needed = evict_size * MEM_BANDWIDTH
                if load_time + evict_time_needed <= deadline:
                    load_time = self._perform_eviction(
                        actions, load_time, evict_start, evict_size, self.loaded
                    )
                else:
                    break
            else:
                break

        return load_time

    def fit(self, requests: list[Request]) -> list[Action]:
        """
        Execute Prophet using Predictor's architecture + Belady's optimal eviction.

        Follows Predictor's proven batch processing strategy:
        1. Group requests by start time
        2. Load batch, execute visits concurrently
        3. Prefetch next batch during compute time
        4. Use optimal eviction instead of LRU
        """
        actions: list[Action] = []
        io_end_time = 0
        compute_end_time = 0

        # Build request index for Belady's algorithm
        self._build_request_index(requests)

        # Group requests by start time (same as Predictor)
        request_groups = self._group_requests_by_start_time(requests)

        for group_idx, (start, reqs) in enumerate(request_groups):
            self.current_group_idx = reqs[0][0]  # Track current position for Belady

            # Load current batch
            load_start_time = max(io_end_time, compute_end_time)
            load_time = load_start_time

            for i, req in reqs:
                load_time = self._load_bytes(req.addr, req.size, actions, load_time)

            io_end_time = load_time

            # Execute Visit operations
            visit_start_time = max(io_end_time, compute_end_time, start)
            batch_duration = reqs[0][1].time
            for i, req in reqs:
                actions.append(VisitAction(self.settings, visit_start_time, i, req))

            compute_end_time = visit_start_time + batch_duration

            # Prefetch next batch during compute time
            has_next_batch = group_idx + 1 < len(request_groups)
            available_time = compute_end_time - io_end_time

            if available_time > 0 and has_next_batch:
                next_start, next_reqs = request_groups[group_idx + 1]
                new_io_time = self._try_prefetch_next_batch(
                    next_reqs, actions, io_end_time, compute_end_time
                )
                io_end_time = new_io_time

            # Release references for current batch
            self._release_references(reqs, self.referenced)

            # Invalidate cache entries that are no longer relevant
            # This keeps cache size bounded and improves lookup performance
            if reqs:
                last_processed_idx = reqs[-1][0]
                self._invalidate_cache_for_index(last_processed_idx)

        # Final time
        final_time = max(io_end_time, compute_end_time)
        actions.append(FinAction(final_time))

        # Sort actions by time
        actions.sort(key=lambda a: a.time)

        return actions
