from __future__ import annotations

import random
from abc import ABC, abstractmethod
from typing import List, Optional

from model import MAX_TIME, Request, Settings, MEM_BANDWIDTH

from utils.intervals import IntervalSet

import numpy as np


class RequestBuilder:
    def __init__(self, settings: Settings):
        self.settings = settings
        self.current_time = 0

    def build(
        self, tick_gen: TickGenerator, max_time_gap: int, seed: int
    ) -> List[Request]:
        """
        Build requests with time incremental restriction
        """

        start = 0
        time_gap = max_time_gap
        requests = []
        pre_req = Request(
            settings=self.settings,
            addr=0,
            size=1,
            start=0,
            time=max(1, int(round(random.random() ** 4 * time_gap))),
        )

        random.seed(seed)

        while len(requests) < self.settings.N:
            addr = tick_gen.gen_new_addr(pre_req=pre_req)
            size = tick_gen.gen_new_size(pre_req=pre_req)

            # check if the visit will exceed the virtual memory boundary
            if addr + size > self.settings.L:
                continue

            start_time_gap = tick_gen.gen_new_start_gap(
                pre_req, time_gap=max_time_gap, size=size
            )

            start = min(start + start_time_gap, MAX_TIME)

            # check if the start time is the same as pre_req's start time, if so, use the same duration
            if start == pre_req.start:
                duration = pre_req.time
            else:
                duration = tick_gen.gen_new_duration(
                    pre_req, time_gap=max_time_gap, size=size, start=start
                )

            if start + duration > MAX_TIME:
                continue

            this_request = Request(
                settings=self.settings,
                addr=addr,
                size=size,
                start=start,
                time=duration,
            )

            # check if the request will exceed time boundary
            if not self._time_bound_check(this_request):
                continue

            self.current_time = this_request.start + this_request.time

            # check if the request will cause HBM OOM
            new_size = self._hbm_oom_check(this_request, requests)

            if new_size == 0:
                continue
            else:
                this_request.size = new_size
                requests.append(this_request)

            if len(requests) > 0:
                pre_req = requests[-1]

        return requests

    def _time_bound_check(self, request: Request) -> bool:
        """check if this request will cause time boundary"""
        this_visit_time = request.start + request.time
        if this_visit_time + self.current_time > MAX_TIME:
            return False
        return True

    def _hbm_oom_check(
        self,
        this_request: Request,
        requests: List[Request],
    ) -> int:
        """check if this request will cause HBM OOM"""
        start = this_request.start
        size = this_request.size
        # start >= pre_request's start time
        same_time_requests = []
        for request in reversed(requests):
            if request.start == start:
                same_time_requests.append(request)
            elif request.start < start:
                break
        if len(same_time_requests) > 0:
            intervals = IntervalSet()
            for request in same_time_requests:
                intervals.add(request.addr, request.addr + request.size)
            used_mem_sum = intervals.total()
            if used_mem_sum + size > self.settings.M:
                remain_mem = self.settings.M - used_mem_sum
                return remain_mem
        return size


class TickGenerator(ABC):
    def __init__(self, settings: Settings):
        self.settings = settings

    @abstractmethod
    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        pass

    @abstractmethod
    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        pass

    @abstractmethod
    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        pass

    @abstractmethod
    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        pass

    @classmethod
    def list_modes(cls) -> list[str]:
        return ["naive", "markov", "markov+", "kvcache", "highload", "mixed-precision"]

    @classmethod
    def from_mode(cls, mode: str, settings: Settings, **kwargs) -> TickGenerator:
        match mode:
            case "naive":
                return TickNaiveGenerator(settings, **kwargs)
            case "markov":
                return TickBinMarkovGenerator(settings, **kwargs)
            case "markov+":
                return TickBinMarkovPlusGenerator(settings, **kwargs)
            case "kvcache":
                return TickKVCacheAccessGenerator(settings, **kwargs)
            case "highload":
                return TickHighLoadGenerator(settings, **kwargs)
            case "mixed-precision":
                return TickMixedPrecisionTrainingGenerator(settings, **kwargs)
            case _:
                raise ValueError(f"Unknown TickGenerator name: {mode}")


class TickNaiveGenerator(TickGenerator):
    """
    Naive tick generator implementation that produces random values
    for memory access parameters.
    """

    def __init__(self, settings: Settings):
        super().__init__(settings)

    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        return random.randint(0, self.settings.L - 1)

    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        return random.randint(1, self.settings.M)

    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        # Use time_gap from kwargs if available, otherwise try to use pre_req's duration
        time_gap = kwargs.get("time_gap", pre_req.time if pre_req else 100)
        duration_time_coefficient = random.random() ** 3
        duration_time_gap = max(int(duration_time_coefficient * time_gap), 1)
        duration = random.randint(1, min(duration_time_gap, MAX_TIME))
        return duration

    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        # Use time_gap from kwargs if available, otherwise try to use pre_req's duration
        time_gap = kwargs.get("time_gap", pre_req.time if pre_req else 100)
        start_time_gap_coefficient = random.random() ** 4
        start_time_gap = int(start_time_gap_coefficient * time_gap)
        return start_time_gap


class TickBinMarkovGenerator(TickGenerator):
    """
    Tick generator implementation that uses Markov chains with bin-based states
    to generate memory access parameters with temporal patterns.
    """

    def __init__(self, settings: Settings):
        super().__init__(settings)
        self.bins = self._gen_random_bins()
        self.P = self._gen_random_trans_matrix(self.bins)

    @staticmethod
    def _gen_random_bins() -> list:
        """Randomly generate bins for duration"""
        # Number of bins, range from 3 to 7
        num_bins = random.randint(3, 7)
        # Start from 1 (minimum duration is 1 as well)
        current_value = 1
        bins = [current_value]
        for _ in range(num_bins - 1):
            # Randomly select growth factor, range from 2 to 8
            growth_factor = random.uniform(2, 8)
            # Generate next bin value, ensure at least increase by 5
            next_value = max(int(current_value * growth_factor), current_value + 5)
            # To make bin values look like 10, 50, 100, etc., round to nearest multiple of 5 or 10
            if next_value < 50:
                next_value = round(next_value / 5) * 5
            else:
                next_value = round(next_value / 10) * 10
            bins.append(next_value)
            current_value = next_value

        return bins

    @staticmethod
    def _gen_random_trans_matrix(bins: list) -> np.ndarray:
        """Generate random state transition matrix"""
        k = len(bins) - 1
        P = np.zeros((k, k))

        for i in range(k):
            base_probs = np.zeros(k)
            for j in range(k):
                # Tend to transfer to smaller bins
                base_probs[j] = np.exp(-1.0 * (j - i))

            noise = np.random.uniform(0.5, 2, size=k)
            noisy_probs = base_probs * noise
            P[i] = noisy_probs / noisy_probs.sum()

        return P

    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate a new duration using Markov chain based on previous duration's bin."""
        if pre_req is None:
            random_bin_idx = random.randint(0, len(self.bins) - 2)
            return random.randint(
                self.bins[random_bin_idx], self.bins[random_bin_idx + 1]
            )

        pre_dur = pre_req.time
        bin_idx = np.digitize([pre_dur], self.bins)[0] - 1
        bin_idx = np.clip(bin_idx, 0, self.P.shape[0] - 1)
        probs = self.P[bin_idx]
        probs = probs / probs.sum()  # Normalize probabilities
        next_bin = np.random.choice(len(probs), p=probs)

        # Generate value within the selected bin
        low = self.bins[next_bin]
        # Handle last bin specially to avoid index error
        high = self.bins[next_bin + 1] if next_bin + 1 < len(self.bins) else low * 2
        val = np.random.uniform(low, high)
        new_dur = max(1, int(round(val)))  # Ensure duration is at least 1
        new_dur = min(new_dur, MAX_TIME)
        return new_dur

    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate a new start gap based on previous duration using normal distribution."""
        if pre_req is None:
            random_bin_idx = random.randint(0, len(self.bins) - 2)
            pre_dur = random.randint(
                self.bins[random_bin_idx], self.bins[random_bin_idx + 1]
            )
        else:
            pre_dur = pre_req.time
        mean_val = pre_dur * np.random.uniform(0.1, 1.0)
        sigma_val = mean_val * np.random.uniform(0.1, 1.0)
        gap = np.random.normal(loc=mean_val, scale=sigma_val)
        new_start_gap = max(0, int(round(gap)))  # Ensure gap is non-negative
        return new_start_gap

    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        return np.random.randint(0, self.settings.L - 1)

    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        return np.random.randint(1, self.settings.M)


class TickKVCacheAccessGenerator(TickGenerator):
    """
    KV-cache access pattern generator, simulates KV-cache access behavior during large language model inference.

    Features:
    1. Sequential memory region access
    2. Relatively fixed access size, usually a specific block size
    3. High access frequency, short intervals
    4. Usually short duration
    """

    def __init__(
        self,
        settings: Settings,
        block_size: int = 64,
        cache_region_start: int = 0,
        cache_region_end: int = None,
    ):
        super().__init__(settings)
        # KV-cache block size configuration
        # Default 64-byte block size
        self.block_size = block_size
        self.cache_region_start = cache_region_start
        self.cache_region_end = (
            cache_region_end if cache_region_end is not None else self.settings.L
        )
        self.current_block = 0
        # Generate number of blocks
        self.num_blocks = (
            self.cache_region_end - self.cache_region_start
        ) // self.block_size

    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate KV-cache access address, mainly sequential access pattern"""
        # 80% probability of sequential access, 20% probability of random access (simulating non-contiguous access in attention mechanism)
        if random.random() < 0.8 and pre_req is not None:
            # Sequential access pattern, small fluctuations near current block
            offset = random.randint(-2, 3)  # Allow small jumps
            self.current_block = (self.current_block + offset) % self.num_blocks
        else:
            # Random access pattern, may jump to any block
            jump_prob = random.random()
            if jump_prob < 0.3:  # 30% probability of randomizing near current position
                self.current_block = max(
                    0,
                    min(
                        self.num_blocks - 1, self.current_block + random.randint(-5, 5)
                    ),
                )
            else:  # 70% probability of true random jump
                self.current_block = random.randint(0, self.num_blocks - 1)

        # Calculate actual memory address
        addr = self.cache_region_start + self.current_block * self.block_size
        # Ensure address is within valid range
        return min(addr, self.settings.L - self.block_size)

    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate KV-cache access size, usually fixed block size or its multiples"""
        # 90% probability of using standard block size, 10% probability of using larger blocks (e.g., 2-4 consecutive blocks)
        if random.random() < 0.9:
            return self.block_size
        else:
            # Randomly select 2-4 consecutive blocks
            return self.block_size * random.randint(2, 4)

    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate KV-cache access duration, usually short"""
        # KV-cache access is usually very fast
        base_duration = 1
        # May have small random fluctuations
        duration = max(1, int(base_duration * random.uniform(0.8, 1.5)))
        return duration

    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate KV-cache access interval, usually short (high-frequency access)"""
        # Default base interval time
        base_gap = 5
        # Use time_gap parameter from kwargs if provided
        time_gap = kwargs.get("time_gap", base_gap)
        # High-frequency access, interval is usually a small fraction of time_gap
        return max(0, int(time_gap * random.uniform(0.1, 0.3)))


class TickHighLoadGenerator(TickGenerator):
    """
    High-load memory access pattern generator, simulates memory access behavior of compute-intensive workloads.

    Features:
    1. Memory-intensive access, large memory blocks accessed frequently
    2. Short access intervals, simulating high throughput
    3. Some locality, but overall high randomness
    4. Relatively long duration, simulating compute operations
    """

    def __init__(
        self,
        settings: Settings,
        min_block_size: int = 128,
        max_block_size: int = 512,
        locality_factor: float = 0.4,
    ):
        super().__init__(settings)
        # High-load access configuration
        self.min_block_size = min_block_size  # Minimum block size
        self.max_block_size = max_block_size  # Maximum block size
        self.locality_factor = locality_factor
        self.current_region = 0
        # Define local region size
        self.region_size = min(1024, self.settings.L // 4)

    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate high-load memory access address, with some locality but overall random"""
        # Decide whether to access within current region or randomly jump to new region based on locality factor
        if random.random() < self.locality_factor and pre_req is not None:
            # Local access within current region, allow some offset
            offset = random.randint(-self.region_size // 4, self.region_size // 4)
            addr = self.current_region + offset
        else:
            # Random jump to new region
            num_regions = max(1, self.settings.L // self.region_size)
            self.current_region = random.randint(0, num_regions - 1) * self.region_size
            addr = self.current_region

        # Add some random offset to increase access randomness
        addr += random.randint(0, self.region_size // 2)

        # Ensure address is within valid range and leaves enough space for memory block
        max_addr = self.settings.L - self.max_block_size
        return max(0, min(addr, max_addr))

    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate high-load memory access size, usually large"""
        # Generate a larger memory block size between min and max
        size = random.randint(self.min_block_size, self.max_block_size)
        # Sometimes access larger blocks (simulating batch operations)
        if random.random() < 0.1:
            size = size * random.randint(2, 4)
        # Ensure size does not exceed physical and virtual memory limits
        return min(size, self.settings.M, self.settings.L)

    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate high-load memory access duration, usually longer"""
        # Base duration, affected by time_gap parameter
        base_duration = kwargs.get("time_gap", 100) * random.uniform(0.5, 1.5)
        # Consider impact of access size on duration
        if pre_req is not None:
            # If previous request size is large, may need more time
            if pre_req.size > self.max_block_size:
                base_duration *= random.uniform(1.2, 2.0)
        # Ensure duration is within reasonable range
        duration = max(5, int(base_duration))
        return min(duration, MAX_TIME)

    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate high-load memory access interval, usually short"""
        # Base interval time, affected by time_gap parameter
        base_gap = kwargs.get("time_gap", 100)
        # Under high load, intervals are usually short but with some randomness
        gap = base_gap * random.uniform(0.1, 0.4)
        # May have extremely short intervals (simulating true high load)
        if random.random() < 0.2:
            gap = 1
        return max(0, int(gap))


class TickMixedPrecisionTrainingGenerator(TickGenerator):
    """
    Mixed-precision training memory access pattern generator, simulates memory access behavior during mixed-precision training of deep learning models.

    Features:
    1. Alternating access to different precision data regions (simulating FP16, FP32, etc.)
    2. Frequent switching between weights, gradients, and optimizer states
    3. Clear training cycle patterns including forward propagation, backward propagation, and parameter updates
    4. Memory access shows certain regularity and cyclical nature
    """

    def __init__(
        self,
        settings: Settings,
        layer_size: int = 256,
        num_layers: int = 10,
        model_region_start: int = 0,
    ):
        super().__init__(settings)
        # Mixed-precision training configuration
        self.layer_size = layer_size  # Size per layer
        # Number of network layers
        self.num_layers = num_layers
        self.region_start = model_region_start

        self.current_layer = 0
        self.current_phase = (
            0  # 0: forward propagation, 1: backward propagation, 2: parameter update
        )
        self.current_access_type = 0  # 0: weights, 1: gradients, 2: optimizer states

    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate mixed-precision training memory access address, simulating access to different training phases and data types"""
        # Update current training phase and access type, simulating training cycle
        if random.random() < 0.1 or pre_req is None:
            # 10% probability to switch to next phase
            self.current_phase = (self.current_phase + 1) % 3

            # Update access type and layer based on phase
            if self.current_phase == 0:  # Forward propagation
                self.current_access_type = 0  # Mainly access weights
                self.current_layer = random.randint(0, self.num_layers - 1)
            elif self.current_phase == 1:  # Backward propagation
                self.current_access_type = 1  # Mainly access gradients
                self.current_layer = random.randint(0, self.num_layers - 1)
            else:  # Parameter update
                self.current_access_type = random.randint(
                    0, 2
                )  # Randomly access weights, gradients, or optimizer states
                self.current_layer = random.randint(0, self.num_layers - 1)

        # Calculate address based on current phase, access type, and layer
        region_offset = self.current_access_type * self.layer_size * self.num_layers
        layer_offset = self.current_layer * self.layer_size
        addr = self.region_start + region_offset + layer_offset

        # Add some random offset to simulate accessing different parts within the layer
        addr += random.randint(0, self.layer_size // 2)

        # Ensure address is within valid range
        max_addr = self.settings.L - 64  # Leave enough space for memory block
        return max(0, min(addr, max_addr))

    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate mixed-precision training memory access size, simulating access to different precision data"""
        # Determine access size based on access type and phase
        if self.current_phase == 0:  # Forward propagation, may use low precision data
            if random.random() < 0.6:  # 60% probability to use half precision size
                base_size = 32  # Simulate FP16 access
            else:
                base_size = 64  # Simulate FP32 access
        elif (
            self.current_phase == 1
        ):  # Backward propagation, may mix different precisions
            if random.random() < 0.5:  # 50% probability to use half precision size
                base_size = 32
            else:
                base_size = 64
        else:  # Parameter update, usually high precision
            if random.random() < 0.2:  # 20% probability to use half precision size
                base_size = 32
            else:
                base_size = 64

        # May adjust size based on current access type
        if self.current_access_type == 2:  # Optimizer states may be larger
            base_size = base_size * 2

        # Add some random variation
        size = int(base_size * random.uniform(0.8, 1.5))
        # Ensure size is power of 2 (common data alignment requirement)
        size = 2 ** int(np.ceil(np.log2(max(16, size))))
        return min(size, self.settings.M, self.settings.L)

    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate mixed-precision training memory access duration, simulating operations with different computational intensities"""
        # Base duration time, affected by time_gap parameter
        base_duration = kwargs.get("time_gap", 100) * random.uniform(0.3, 0.8)

        # Adjust duration based on phase
        if self.current_phase == 0:  # Forward propagation
            base_duration *= random.uniform(0.8, 1.2)
        elif (
            self.current_phase == 1
        ):  # Backward propagation (usually more resource intensive)
            base_duration *= random.uniform(1.2, 2.0)
        else:  # Parameter update
            base_duration *= random.uniform(0.5, 1.0)

        # Ensure duration is within reasonable range
        duration = max(3, int(base_duration))
        return min(duration, MAX_TIME)

    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate mixed-precision training memory access interval, simulating computation and memory access switching during training"""
        # Base interval time, affected by time_gap parameter
        base_gap = kwargs.get("time_gap", 100)

        # Adjust interval based on phase
        if self.current_phase == 0:  # Forward propagation
            gap = base_gap * random.uniform(0.1, 0.3)
        elif self.current_phase == 1:  # Backward propagation
            gap = base_gap * random.uniform(0.15, 0.4)
        else:  # Parameter update
            gap = base_gap * random.uniform(0.2, 0.5)

        # May have burst continuous access during training
        if random.random() < 0.1:
            gap = 1

        return max(0, int(gap))


class TickBinMarkovPlusGenerator(TickGenerator):
    """
    Tick generator implementation that uses Markov chains with bin-based states
    to generate memory access parameters with temporal patterns.
    """

    def __init__(self, settings: Settings):
        super().__init__(settings)
        self.bins = self._gen_random_bins()
        self.P = self._gen_random_trans_matrix(self.bins)

    @staticmethod
    def _gen_random_bins() -> list:
        """Randomly generate bins for duration"""
        # Number of bins, range from 3 to 7
        num_bins = random.randint(3, 7)
        # Start from 1 (minimum duration is 1 as well)
        current_value = 1
        bins = [current_value]
        for _ in range(num_bins - 1):
            # Randomly select growth factor, range from 2 to 8
            growth_factor = random.uniform(2, 8)
            # Generate next bin value, ensure at least increase by 5
            next_value = max(int(current_value * growth_factor), current_value + 5)
            # To make bin values look like 10, 50, 100, etc., round to nearest multiple of 5 or 10
            if next_value < 50:
                next_value = round(next_value / 5) * 5
            else:
                next_value = round(next_value / 10) * 10
            bins.append(next_value)
            current_value = next_value

        return bins

    @staticmethod
    def _gen_random_trans_matrix(bins: list) -> np.ndarray:
        """Generate random state transition matrix"""
        k = len(bins) - 1
        P = np.zeros((k, k))

        for i in range(k):
            base_probs = np.zeros(k)
            for j in range(k):
                # Tend to transfer to smaller bins
                base_probs[j] = np.exp(-1.0 * (j - i))

            noise = np.random.uniform(0.5, 2, size=k)
            noisy_probs = base_probs * noise
            P[i] = noisy_probs / noisy_probs.sum()

        return P

    def gen_new_duration(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate a new duration using Markov chain based on previous duration's bin."""
        cur_start = kwargs.get("start", 0)
        cur_req_size = kwargs.get("size", 1)
        if pre_req is None:
            random_bin_idx = random.randint(0, len(self.bins) - 2)
            return random.randint(
                self.bins[random_bin_idx], self.bins[random_bin_idx + 1]
            )

        prob_for_large_duration = np.random.random()
        remain_time_budget_ratio = (MAX_TIME - pre_req.start) / MAX_TIME
        prob_for_large_duration *= remain_time_budget_ratio

        if prob_for_large_duration < 1 / 2:
            pre_dur = pre_req.time
            bin_idx = np.digitize([pre_dur], self.bins)[0] - 1
            bin_idx = np.clip(bin_idx, 0, self.P.shape[0] - 1)
            probs = self.P[bin_idx]
            probs = probs / probs.sum()  # Normalize probabilities
            next_bin = np.random.choice(len(probs), p=probs)

            # Generate value within the selected bin
            low = self.bins[next_bin]
            # Handle last bin specially to avoid index error
            high = self.bins[next_bin + 1] if next_bin + 1 < len(self.bins) else low * 2
            val = np.random.uniform(low, high)
            new_dur = max(1, int(round(val)))  # Ensure duration is at least 1
        else:
            io_time = (cur_req_size * MEM_BANDWIDTH) % self.settings.M
            multiplier = float(np.random.uniform(0.5, 3.0))
            required_min = max(1, int(io_time * multiplier))
            if cur_start + required_min > MAX_TIME:
                required_min = cur_start * np.random.uniform(0.001, 0.05)
            new_dur = required_min

        new_dur = min(new_dur % MAX_TIME, MAX_TIME)
        return new_dur

    def gen_new_start_gap(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        """Generate a new start gap based on previous duration using normal distribution."""
        if pre_req is None:
            random_bin_idx = random.randint(0, len(self.bins) - 2)
            pre_dur = random.randint(
                self.bins[random_bin_idx], self.bins[random_bin_idx + 1]
            )
        else:
            pre_dur = pre_req.time

        randomly_prob = np.random.random()
        remain_time_budget_ratio = (MAX_TIME - pre_req.start) / MAX_TIME
        randomly_prob *= remain_time_budget_ratio

        if randomly_prob < 1 / 2:
            mean_val = pre_dur * np.random.uniform(0.1, 0.5)
            sigma_val = mean_val * np.random.uniform(1, 2)
            gap = np.random.normal(loc=mean_val, scale=sigma_val)
            new_start_gap = max(0, int(round(gap)))  # Ensure gap is non-negative
        elif 1 / 2 <= randomly_prob < 1 and pre_req:
            pre_io_time = pre_req.size * MEM_BANDWIDTH
            pre_comp_time = pre_req.time
            alpha = np.random.random()
            new_start_gap = int(alpha * pre_io_time + (1 - alpha) * pre_comp_time)
        else:
            new_start_gap = 0
        if MAX_TIME - pre_req.start < new_start_gap:
            new_start_gap = 0
        return new_start_gap

    def gen_new_addr(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        return np.random.randint(0, self.settings.L - 1)

    def gen_new_size(self, pre_req: Optional[Request] = None, **kwargs) -> int:
        return np.random.randint(1, self.settings.M)
