
import torch
import torch.nn.functional as F

def process_observation_to_sequence(obs: dict) -> torch.Tensor:
    """
    [V3 - Entity Tokenization & Normalization]
    Converts the environment observation into a tokenized sequence tensor,
    where each process and resource is an "entity" token. This structure is
    optimal for the Transformer's self-attention mechanism.

    This version is self-contained and infers all required dimensions from
    the 'obs' dictionary.

    Args:
        obs: The observation dictionary from the environment, which must
             include 'total_resources'.

    Returns:
        A torch.Tensor of shape [num_entities, feature_dim], where
        num_entities = num_processes + num_resources.
    """
    # --- 1. Infer Dimensions & Extract Data ---
    if 'total_resources' not in obs:
        raise ValueError("Observation dictionary must contain 'total_resources' for normalization.")

    num_processes = obs['allocation'].shape[0]
    num_resources = obs['allocation'].shape[1]

    # Convert all numpy arrays to float tensors for processing
    total_resources = torch.from_numpy(obs['total_resources']).float()
    allocation = torch.from_numpy(obs['allocation']).float()
    need = torch.from_numpy(obs['need']).float()
    available = torch.from_numpy(obs['available']).float()
    processes_status = torch.from_numpy(obs['processes_status']).long() # Keep as long for one-hot
    process_properties = torch.tensor([p['is_realtime'] for p in obs['process_properties']], dtype=torch.float)

    # --- 2. Normalize Numerical Features ---
    # Normalization is crucial for stable training. We scale resource counts
    # by the total available, preventing large numbers from dominating the model.
    epsilon = 1e-8 # Add a small epsilon to prevent division by zero
    norm_allocation = allocation / (total_resources.unsqueeze(0) + epsilon)
    norm_need = need / (total_resources.unsqueeze(0) + epsilon)
    norm_available = available / (total_resources + epsilon)

    # --- 3. Create Process Entity Tokens ---
    # Each process becomes a token with a feature vector describing its state.
    process_tokens = []
    for i in range(num_processes):
        status_one_hot = F.one_hot(processes_status[i], num_classes=3).float()

        # Feature vector for a process
        process_features = torch.cat([
            norm_allocation[i],          # How many of each resource it holds (normalized)
            norm_need[i],                # How many of each resource it needs (normalized)
            status_one_hot,              # Its current status (Running/Blocked/Terminated)
            process_properties[i].unsqueeze(0), # Whether it's a real-time process
            torch.tensor([1.0, 0.0], dtype=torch.float32) # Entity type identifier: [is_process, is_resource]
        ])
        process_tokens.append(process_features)

    # --- 4. Create Resource Entity Tokens ---
    # Each resource also becomes a token.
    resource_tokens = []
    # Calculate total allocated amount for each resource
    total_allocated_per_resource = allocation.sum(dim=0)
    norm_total_allocated = total_allocated_per_resource / (total_resources + epsilon)

    # The feature vector for resources must have the same dimension as for processes.
    # We pad with zeros.
    process_feature_dim = process_tokens[0].shape[0]
    resource_base_feature_dim = 2 + 2 # available, total_allocated, entity_type
    padding_size = process_feature_dim - resource_base_feature_dim

    for i in range(num_resources):
        # Feature vector for a resource
        resource_features = torch.cat([
            norm_available[i].unsqueeze(0),
            norm_total_allocated[i].unsqueeze(0),
            torch.zeros(padding_size), # Pad to match dimension
            torch.tensor([0.0, 1.0], dtype=torch.float32) # Entity type identifier
        ])
        resource_tokens.append(resource_features)

    # --- 5. Combine All Tokens ---
    # The final sequence is a list of all entity tokens.
    all_entity_tokens = torch.stack(process_tokens + resource_tokens)

    return all_entity_tokens

