Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- benchmarks/ADRS/cloudcast/README.md +50 -0
- benchmarks/ADRS/cloudcast/config.yaml +30 -0
- benchmarks/ADRS/cloudcast/evaluator/Dockerfile +13 -0
- benchmarks/ADRS/cloudcast/evaluator/broadcast.py +44 -0
- benchmarks/ADRS/cloudcast/evaluator/download_dataset.sh +37 -0
- benchmarks/ADRS/cloudcast/evaluator/evaluate.py +224 -0
- benchmarks/ADRS/cloudcast/evaluator/evaluate.sh +7 -0
- benchmarks/ADRS/cloudcast/evaluator/evaluator.py +297 -0
- benchmarks/ADRS/cloudcast/evaluator/requirements.txt +2 -0
- benchmarks/ADRS/cloudcast/evaluator/simulator.py +196 -0
- benchmarks/ADRS/cloudcast/evaluator/utils.py +109 -0
- benchmarks/ADRS/cloudcast/evaluator/wrapper.py +98 -0
- benchmarks/ADRS/cloudcast/initial_program.py +118 -0
- benchmarks/ADRS/eplb/config.yaml +37 -0
- benchmarks/ADRS/eplb/evaluator/Dockerfile +13 -0
- benchmarks/ADRS/eplb/evaluator/evaluate.sh +7 -0
- benchmarks/ADRS/eplb/evaluator/evaluate_best_program.py +66 -0
- benchmarks/ADRS/eplb/evaluator/evaluator.py +244 -0
- benchmarks/ADRS/eplb/evaluator/wrapper.py +98 -0
- benchmarks/ADRS/llm_sql/README.md +56 -0
- benchmarks/ADRS/llm_sql/config.yaml +81 -0
- benchmarks/ADRS/llm_sql/evaluator/Dockerfile +13 -0
- benchmarks/ADRS/llm_sql/evaluator/evaluate.sh +7 -0
- benchmarks/ADRS/llm_sql/evaluator/evaluator.py +227 -0
- benchmarks/ADRS/llm_sql/evaluator/solver.py +161 -0
- benchmarks/ADRS/llm_sql/evaluator/wrapper.py +98 -0
- benchmarks/ADRS/prism/evaluator/evaluate.sh +7 -0
- benchmarks/ADRS/prism/initial_program.py +75 -0
- benchmarks/ADRS/prism/initial_program_naive.py +30 -0
- benchmarks/ADRS/txn_scheduling/config.yaml +43 -0
- benchmarks/ADRS/txn_scheduling/evaluator/Dockerfile +13 -0
- benchmarks/ADRS/txn_scheduling/evaluator/evaluate.sh +7 -0
- benchmarks/ADRS/txn_scheduling/evaluator/evaluator.py +258 -0
- benchmarks/ADRS/txn_scheduling/evaluator/requirements.txt +1 -0
- benchmarks/ADRS/txn_scheduling/evaluator/txn_simulator.py +229 -0
- benchmarks/ADRS/txn_scheduling/evaluator/workloads.py +12 -0
- benchmarks/ADRS/txn_scheduling/evaluator/wrapper.py +98 -0
- benchmarks/ADRS/txn_scheduling/initial_program.py +106 -0
- benchmarks/ale_bench/README.md +84 -0
- benchmarks/ale_bench/ale-bench-lite-problems/ahc008/best_program.cpp +612 -0
- benchmarks/ale_bench/ale-bench-lite-problems/ahc008/evaluator.py +65 -0
- benchmarks/ale_bench/ale-bench-lite-problems/ahc015/evaluator.py +65 -0
- benchmarks/ale_bench/ale-bench-lite-problems/ahc039/best_program.cpp +1003 -0
- benchmarks/ale_bench/ale-bench-lite-problems/ahc039/initial_program.cpp +925 -0
- benchmarks/ale_bench/ale-bench-lite-problems/ahc046/best_program.cpp +1111 -0
- benchmarks/ale_bench/ale_agent_best/ahc008.cpp +508 -0
- benchmarks/ale_bench/ale_agent_best/ahc011.cpp +607 -0
- benchmarks/ale_bench/ale_agent_best/ahc015.cpp +491 -0
- benchmarks/ale_bench/ale_agent_best/ahc024.cpp +481 -0
- benchmarks/ale_bench/ale_agent_best/ahc025.cpp +628 -0
benchmarks/ADRS/cloudcast/README.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Cloudcast — Multi-Cloud Data Transfer Optimization
|
| 2 |
+
|
| 3 |
+
Broadcast a dataset from a source cloud region to multiple destinations at minimum total cost. The evolved `search_algorithm` constructs routing topologies (relay trees, Steiner-like structures) that exploit shared intermediate hops across cloud providers.
|
| 4 |
+
|
| 5 |
+
Based on the Skyplane/Cloudcast system (NSDI'24).
|
| 6 |
+
|
| 7 |
+
## Setup
|
| 8 |
+
|
| 9 |
+
1. **Download the dataset** (network profiles and evaluation configs):
|
| 10 |
+
|
| 11 |
+
```bash
|
| 12 |
+
cd benchmarks/ADRS/cloudcast
|
| 13 |
+
bash download_dataset.sh
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
This downloads:
|
| 17 |
+
- `profiles/cost.csv` — egress cost ($/GB) per region pair
|
| 18 |
+
- `profiles/throughput.csv` — measured throughput (bps) per region pair
|
| 19 |
+
- `examples/config/*.json` — 5 network configurations used for evaluation (intra-AWS, intra-Azure, intra-GCP, inter-cloud)
|
| 20 |
+
|
| 21 |
+
2. **Set your API key:**
|
| 22 |
+
|
| 23 |
+
```bash
|
| 24 |
+
export OPENAI_API_KEY=...
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
## Run
|
| 28 |
+
|
| 29 |
+
From the repo root:
|
| 30 |
+
|
| 31 |
+
```bash
|
| 32 |
+
uv run skydiscover-run \
|
| 33 |
+
benchmarks/ADRS/cloudcast/initial_program.py \
|
| 34 |
+
benchmarks/ADRS/cloudcast/evaluator.py \
|
| 35 |
+
-c benchmarks/ADRS/cloudcast/config.yaml \
|
| 36 |
+
-s [your_algorithm] \
|
| 37 |
+
-i 100
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
## Files
|
| 41 |
+
|
| 42 |
+
| File | Description |
|
| 43 |
+
|------|-------------|
|
| 44 |
+
| `initial_program.py` | Baseline `search_algorithm` function to evolve |
|
| 45 |
+
| `evaluator.py` | Scores programs on total transfer cost across 5 network configs |
|
| 46 |
+
| `config.yaml` | Task-specific config (LLM, evaluator timeout, system prompt) |
|
| 47 |
+
| `simulator.py` | Broadcast cost simulator |
|
| 48 |
+
| `broadcast.py` | `BroadCastTopology` data structure |
|
| 49 |
+
| `utils.py` | Graph construction from profile CSVs |
|
| 50 |
+
| `download_dataset.sh` | Script to download required data files |
|
benchmarks/ADRS/cloudcast/config.yaml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CloudCast — Cloud Broadcast Optimization (NSDI'24)
|
| 2 |
+
# Usage: skydiscover-run initial_program.py evaluator.py -c config.yaml -s <strategy>
|
| 3 |
+
language: python
|
| 4 |
+
diff_based_generation: true
|
| 5 |
+
max_iterations: 100
|
| 6 |
+
checkpoint_interval: 5
|
| 7 |
+
max_solution_length: 60000
|
| 8 |
+
|
| 9 |
+
llm:
|
| 10 |
+
api_base: https://api.openai.com/v1
|
| 11 |
+
models:
|
| 12 |
+
- name: "gpt-5"
|
| 13 |
+
weight: 1.0
|
| 14 |
+
max_tokens: 32000
|
| 15 |
+
timeout: 600
|
| 16 |
+
|
| 17 |
+
prompt:
|
| 18 |
+
system_message: |-
|
| 19 |
+
You are an expert in cloud infrastructure optimization. Your task is to evolve the
|
| 20 |
+
search_algorithm(src, dsts, G, num_partitions) function to minimize overall
|
| 21 |
+
data transfer cost across multiple clouds.
|
| 22 |
+
Focus on efficiently broadcasting input data to multiple destination nodes by leveraging
|
| 23 |
+
parallel paths and overlapping transfers across networks. Use the BroadCastTopology
|
| 24 |
+
class and make_nx_graph function to identify low-cost routes.
|
| 25 |
+
Prioritize strategies that reduce redundant transfers, balance load across networks,
|
| 26 |
+
and exploit multi-network topologies to minimize cost.
|
| 27 |
+
|
| 28 |
+
evaluator:
|
| 29 |
+
timeout: 600
|
| 30 |
+
|
benchmarks/ADRS/cloudcast/evaluator/Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
WORKDIR /benchmark
|
| 3 |
+
|
| 4 |
+
COPY requirements.txt .
|
| 5 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 6 |
+
|
| 7 |
+
# wrapper.py provides backwards compatibility for old Python-based evaluators
|
| 8 |
+
# that define evaluate(program_path) -> dict, bridging them to the container
|
| 9 |
+
# JSON protocol. Source of truth: skydiscover/evaluation/wrapper.py
|
| 10 |
+
COPY . .
|
| 11 |
+
RUN chmod +x evaluate.sh
|
| 12 |
+
|
| 13 |
+
ENTRYPOINT ["./evaluate.sh"]
|
benchmarks/ADRS/cloudcast/evaluator/broadcast.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class SingleDstPath(Dict):
|
| 5 |
+
partition: int
|
| 6 |
+
edges: List[List] # [[src, dst, edge data]]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BroadCastTopology:
|
| 10 |
+
def __init__(self, src: str, dsts: List[str], num_partitions: int = 4, paths: Dict[str, SingleDstPath] = None):
|
| 11 |
+
self.src = src # single str
|
| 12 |
+
self.dsts = dsts # list of strs
|
| 13 |
+
self.num_partitions = num_partitions
|
| 14 |
+
|
| 15 |
+
# dict(dst) --> dict(partition) --> list(nx.edges)
|
| 16 |
+
# example: {dst1: {partition1: [src->node1, node1->dst1], partition 2: [src->dst1]}}
|
| 17 |
+
if paths is not None:
|
| 18 |
+
self.paths = paths
|
| 19 |
+
self.set_graph()
|
| 20 |
+
else:
|
| 21 |
+
self.paths = {dst: SingleDstPath().fromkeys(range(num_partitions)) for dst in dsts}
|
| 22 |
+
|
| 23 |
+
def get_paths(self):
|
| 24 |
+
print(f"now the set path is: {self.paths}")
|
| 25 |
+
return self.paths
|
| 26 |
+
|
| 27 |
+
def set_num_partitions(self, num_partitions: int):
|
| 28 |
+
self.num_partitions = num_partitions
|
| 29 |
+
|
| 30 |
+
def set_dst_partition_paths(self, dst: str, partition: int, paths: List[List]):
|
| 31 |
+
"""
|
| 32 |
+
Set paths for partition = partition to reach dst
|
| 33 |
+
"""
|
| 34 |
+
partition = str(partition)
|
| 35 |
+
self.paths[dst][partition] = paths
|
| 36 |
+
|
| 37 |
+
def append_dst_partition_path(self, dst: str, partition: int, path: List):
|
| 38 |
+
"""
|
| 39 |
+
Append path for partition = partition to reach dst
|
| 40 |
+
"""
|
| 41 |
+
partition = str(partition)
|
| 42 |
+
if self.paths[dst][partition] is None:
|
| 43 |
+
self.paths[dst][partition] = []
|
| 44 |
+
self.paths[dst][partition].append(path)
|
benchmarks/ADRS/cloudcast/evaluator/download_dataset.sh
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Download dataset and config files for the Cloudcast benchmark.
|
| 3 |
+
#
|
| 4 |
+
# Required files:
|
| 5 |
+
# profiles/cost.csv - Cloud egress cost per region pair ($/GB)
|
| 6 |
+
# profiles/throughput.csv - Measured throughput per region pair (bps)
|
| 7 |
+
# examples/config/*.json - Network configurations for evaluation
|
| 8 |
+
#
|
| 9 |
+
# Usage:
|
| 10 |
+
# cd benchmarks/ADRS/cloudcast
|
| 11 |
+
# bash download_dataset.sh
|
| 12 |
+
|
| 13 |
+
set -euo pipefail
|
| 14 |
+
cd "$(dirname "$0")"
|
| 15 |
+
|
| 16 |
+
BASE_URL="https://huggingface.co/datasets/f20180301/adrs-data/resolve/main/cloudcast"
|
| 17 |
+
|
| 18 |
+
echo "Downloading Cloudcast benchmark data..."
|
| 19 |
+
|
| 20 |
+
# Download profiles
|
| 21 |
+
mkdir -p profiles
|
| 22 |
+
echo " Downloading profiles/cost.csv..."
|
| 23 |
+
wget -q -O profiles/cost.csv "${BASE_URL}/profiles/cost.csv"
|
| 24 |
+
echo " Downloading profiles/throughput.csv..."
|
| 25 |
+
wget -q -O profiles/throughput.csv "${BASE_URL}/profiles/throughput.csv"
|
| 26 |
+
|
| 27 |
+
# Download example configs
|
| 28 |
+
mkdir -p examples/config
|
| 29 |
+
for config in intra_aws.json intra_azure.json intra_gcp.json inter_agz.json inter_gaz2.json; do
|
| 30 |
+
echo " Downloading examples/config/${config}..."
|
| 31 |
+
wget -q -O "examples/config/${config}" "${BASE_URL}/examples/config/${config}"
|
| 32 |
+
done
|
| 33 |
+
|
| 34 |
+
echo ""
|
| 35 |
+
echo "Done. Downloaded files:"
|
| 36 |
+
ls -lh profiles/*.csv
|
| 37 |
+
ls -lh examples/config/*.json
|
benchmarks/ADRS/cloudcast/evaluator/evaluate.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from utils import *
|
| 2 |
+
from simulator import *
|
| 3 |
+
from broadcast import BroadCastTopology
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import networkx as nx
|
| 6 |
+
import subprocess
|
| 7 |
+
import argparse
|
| 8 |
+
import json
|
| 9 |
+
import sys
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def N_dijkstra(src, dsts, G, num_partitions):
|
| 14 |
+
h = G.copy()
|
| 15 |
+
h.remove_edges_from(list(h.in_edges(source_node)) + list(nx.selfloop_edges(h)))
|
| 16 |
+
bc_topology = BroadCastTopology(src, dsts, num_partitions)
|
| 17 |
+
|
| 18 |
+
for dst in dsts:
|
| 19 |
+
path = nx.dijkstra_path(h, src, dst, weight="cost")
|
| 20 |
+
for i in range(0, len(path) - 1):
|
| 21 |
+
s, t = path[i], path[i + 1]
|
| 22 |
+
for j in range(bc_topology.num_partitions):
|
| 23 |
+
bc_topology.append_dst_partition_path(dst, j, [s, t, G[s][t]])
|
| 24 |
+
|
| 25 |
+
return bc_topology
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def N_direct(src, dsts, G, num_partitions):
|
| 29 |
+
bc_topology = BroadCastTopology(src, dsts, num_partitions)
|
| 30 |
+
|
| 31 |
+
for dst in dsts:
|
| 32 |
+
edge = G[src][dst]
|
| 33 |
+
for j in range(bc_topology.num_partitions):
|
| 34 |
+
bc_topology.set_dst_partition_paths(dst, j, [[src, dst, edge]])
|
| 35 |
+
|
| 36 |
+
return bc_topology
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def MULTI_MDST(src, dsts, G, num_partitions):
|
| 40 |
+
# Construct MDST path based on original graph
|
| 41 |
+
h = G.copy()
|
| 42 |
+
MDST_graphs = []
|
| 43 |
+
while len(list(h.edges())) > 0:
|
| 44 |
+
_, MDST_graph = MDST(src, dsts, h, 1)
|
| 45 |
+
print("MDST graph: ", MDST_graph.edges.data())
|
| 46 |
+
MDST_graphs.append(MDST_graph)
|
| 47 |
+
h.remove_edges_from(list(MDST_graph.edges()))
|
| 48 |
+
|
| 49 |
+
print("Number of MDSTs: ", len(MDST_graphs))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def Min_Steiner_Tree(src, dsts, G, num_partitions, hop_limit=3000):
|
| 53 |
+
source_v, dest_v = src, dsts
|
| 54 |
+
|
| 55 |
+
h = G.copy()
|
| 56 |
+
h.remove_edges_from(list(h.in_edges(source_v)) + list(nx.selfloop_edges(h)))
|
| 57 |
+
|
| 58 |
+
nodes, edges = list(h.nodes), list(h.edges)
|
| 59 |
+
num_nodes, num_edges = len(nodes), len(edges)
|
| 60 |
+
id_to_name = {nodes.index(n) + 1: n for n in nodes}
|
| 61 |
+
|
| 62 |
+
config_loc = "write.set"
|
| 63 |
+
write_loc = "test.stplog"
|
| 64 |
+
param_loc = "test.stp"
|
| 65 |
+
|
| 66 |
+
with open(config_loc, "w") as f:
|
| 67 |
+
f.write('stp/logfile = "use_probname"')
|
| 68 |
+
f.close()
|
| 69 |
+
|
| 70 |
+
scipstp_bin = os.environ.get("SCIPSTP_BIN", "scipstp")
|
| 71 |
+
command = f" {scipstp_bin}"
|
| 72 |
+
command += f" -f {param_loc} -s {config_loc} -l {write_loc}"
|
| 73 |
+
|
| 74 |
+
def construct_stp():
|
| 75 |
+
section_begin = '33D32945 STP File, STP Format Version 1.0\n\nSECTION Comment\nName "Relay: cloud regions"\nCreator "SkyDiscover"\n'
|
| 76 |
+
section_begin += f'Remark "Cloud region problem adapted from relay"\nEND\n\nSECTION Graph\n'
|
| 77 |
+
section_begin += f"Nodes {num_nodes}\nEdges {num_edges}\nHopLimit {hop_limit}\n"
|
| 78 |
+
|
| 79 |
+
Edge_info = []
|
| 80 |
+
cnt = 0
|
| 81 |
+
for edge in edges:
|
| 82 |
+
s, d = nodes.index(edge[0]) + 1, nodes.index(edge[1]) + 1
|
| 83 |
+
cost = h[edge[0]][edge[1]]["cost"]
|
| 84 |
+
cnt += 1
|
| 85 |
+
Edge_info.append(f"A {s} {d} {cost}\n")
|
| 86 |
+
if cnt == num_edges:
|
| 87 |
+
Edge_info.append("END\n")
|
| 88 |
+
|
| 89 |
+
s = nodes.index(source_v) + 1
|
| 90 |
+
v = [nodes.index(i) + 1 for i in dest_v]
|
| 91 |
+
terminal_info = [f"T {i}\n" for i in v]
|
| 92 |
+
terminal_info.append("END\n\nEOF")
|
| 93 |
+
section_terminal = f"""\nSECTION Terminals\nRoot {s}\nTerminals {len(dest_v)}\n"""
|
| 94 |
+
|
| 95 |
+
with open(param_loc, "w") as f:
|
| 96 |
+
f.write(section_begin)
|
| 97 |
+
for edge in Edge_info:
|
| 98 |
+
f.write(edge.lstrip())
|
| 99 |
+
f.write(section_terminal)
|
| 100 |
+
for t in terminal_info:
|
| 101 |
+
f.write(t)
|
| 102 |
+
f.close()
|
| 103 |
+
return
|
| 104 |
+
|
| 105 |
+
def read_result(loc):
|
| 106 |
+
di_stree_graph = nx.DiGraph()
|
| 107 |
+
with open(loc, "r") as f:
|
| 108 |
+
lines = f.readlines()
|
| 109 |
+
for line in lines:
|
| 110 |
+
if line.startswith("E") and len(line.split()) == 3:
|
| 111 |
+
l = line.split()
|
| 112 |
+
src_r, dst_r = id_to_name[int(l[1])], id_to_name[int(l[2])]
|
| 113 |
+
di_stree_graph.add_edge(src_r, dst_r, **G[src_r][dst_r])
|
| 114 |
+
|
| 115 |
+
# overlays = [node for node in di_stree_graph.nodes if node not in [source_v]+dest_v]
|
| 116 |
+
return di_stree_graph
|
| 117 |
+
|
| 118 |
+
construct_stp() # construct problem to a file
|
| 119 |
+
process = subprocess.Popen(command, shell=True) # run the steiner tree solver
|
| 120 |
+
process.wait()
|
| 121 |
+
solution_graph = read_result(loc=write_loc)
|
| 122 |
+
|
| 123 |
+
print(
|
| 124 |
+
f"Number of overlays added: {len(solution_graph.nodes) - (1 + len(dsts))}, {[node for node in solution_graph.nodes if node not in [src]+dsts]}"
|
| 125 |
+
)
|
| 126 |
+
bc_topology = BroadCastTopology(src, dsts, num_partitions)
|
| 127 |
+
|
| 128 |
+
os.remove(config_loc)
|
| 129 |
+
os.remove(write_loc)
|
| 130 |
+
os.remove(param_loc)
|
| 131 |
+
|
| 132 |
+
return append_src_dst_paths(src, dsts, solution_graph, bc_topology)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
parser = argparse.ArgumentParser()
|
| 137 |
+
parser.add_argument("jsonfile", help="input json file")
|
| 138 |
+
parser.add_argument("-a", "--algo", type=str, nargs="?", const="")
|
| 139 |
+
parser.add_argument("-n", "--num-vms", type=int, nargs="?", const="")
|
| 140 |
+
args = vars(parser.parse_args())
|
| 141 |
+
print("Args:", args)
|
| 142 |
+
|
| 143 |
+
print(f"\n==============> Baseline generation")
|
| 144 |
+
with open(args["jsonfile"], "r") as f:
|
| 145 |
+
config_name = args["jsonfile"].split("/")[1].split(".")[0]
|
| 146 |
+
config = json.loads(f.read())
|
| 147 |
+
|
| 148 |
+
# generate default graph with node and edge info
|
| 149 |
+
# G = make_nx_graph(throughput_path="profiles/aws_throughput_11_8.csv")
|
| 150 |
+
G = make_nx_graph(num_vms=int(args["num_vms"]))
|
| 151 |
+
|
| 152 |
+
# src, dst
|
| 153 |
+
source_node = config["source_node"]
|
| 154 |
+
terminal_nodes = config["dest_nodes"]
|
| 155 |
+
|
| 156 |
+
print(f"source_v = '{source_node}'")
|
| 157 |
+
print(f"dest_v = {terminal_nodes}")
|
| 158 |
+
# baseline path generations
|
| 159 |
+
if args["algo"] is None:
|
| 160 |
+
algorithms = [
|
| 161 |
+
"Ndirect",
|
| 162 |
+
"MDST",
|
| 163 |
+
# "HST",
|
| 164 |
+
]
|
| 165 |
+
else:
|
| 166 |
+
algorithms = [args["algo"]]
|
| 167 |
+
print(f"Algorithms: {algorithms}\n")
|
| 168 |
+
|
| 169 |
+
directory = f"paths/{config_name}"
|
| 170 |
+
if not os.path.exists(directory):
|
| 171 |
+
Path(directory).mkdir(parents=True, exist_ok=True)
|
| 172 |
+
|
| 173 |
+
num_partitions = config["num_partitions"]
|
| 174 |
+
for algo in algorithms:
|
| 175 |
+
outf = f"{directory}/{algo}.json"
|
| 176 |
+
print(f"Generate {algo} paths into {outf}")
|
| 177 |
+
if algo == "Ndirect":
|
| 178 |
+
bc_t = N_direct(source_node, terminal_nodes, G, num_partitions)
|
| 179 |
+
elif algo == "MDST":
|
| 180 |
+
bc_t, mdgraph = MDST(source_node, terminal_nodes, G, num_partitions)
|
| 181 |
+
elif algo == "MULTI-MDST":
|
| 182 |
+
bc_t = MULTI_MDST(source_node, terminal_nodes, G, num_partitions)
|
| 183 |
+
elif algo == "HST":
|
| 184 |
+
bc_t = Min_Steiner_Tree(source_node, terminal_nodes, G, num_partitions)
|
| 185 |
+
elif algo == "Ndijkstra":
|
| 186 |
+
bc_t = N_dijkstra(source_node, terminal_nodes, G, num_partitions)
|
| 187 |
+
else:
|
| 188 |
+
raise NotImplementedError(algo)
|
| 189 |
+
|
| 190 |
+
bc_t.set_num_partitions(config["num_partitions"]) # simple baseline, don't care about partitions, simply set it
|
| 191 |
+
|
| 192 |
+
with open(outf, "w") as outfile:
|
| 193 |
+
outfile.write(
|
| 194 |
+
json.dumps(
|
| 195 |
+
{
|
| 196 |
+
"algo": algo,
|
| 197 |
+
"source_node": bc_t.src,
|
| 198 |
+
"terminal_nodes": bc_t.dsts,
|
| 199 |
+
"num_partitions": bc_t.num_partitions,
|
| 200 |
+
"generated_path": bc_t.paths,
|
| 201 |
+
}
|
| 202 |
+
)
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# put the evaluate logic here
|
| 206 |
+
input_dir = "paths" # input paths
|
| 207 |
+
output_dir = "evals" # eval results
|
| 208 |
+
with open(sys.argv[1], "r") as f:
|
| 209 |
+
config_name = sys.argv[1].split("/")[1].split(".")[0]
|
| 210 |
+
config = json.loads(f.read())
|
| 211 |
+
|
| 212 |
+
input_dir += f"/{config_name}"
|
| 213 |
+
output_dir += f"/{config_name}"
|
| 214 |
+
if not os.path.exists(output_dir):
|
| 215 |
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
| 216 |
+
|
| 217 |
+
simulator = BCSimulator(int(args["num_vms"]), output_dir)
|
| 218 |
+
for algo in algorithms:
|
| 219 |
+
path = f"{input_dir}/{algo}.json"
|
| 220 |
+
simulator.evaluate_path(path, config) # path of algorithm output, basic config to evaluate
|
| 221 |
+
|
| 222 |
+
# nx.draw(mdgraph, with_labels=True)
|
| 223 |
+
# plt.show()
|
| 224 |
+
# h.render(filename="Ndirect")
|
benchmarks/ADRS/cloudcast/evaluator/evaluate.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
PROGRAM="$1"
|
| 5 |
+
# MODE ($2) accepted but ignored — override this file to use train/test splits.
|
| 6 |
+
|
| 7 |
+
python /benchmark/evaluator.py "$PROGRAM"
|
benchmarks/ADRS/cloudcast/evaluator/evaluator.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.util
|
| 2 |
+
import traceback
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
# Add parent directory to Python path
|
| 9 |
+
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
sys.path.insert(0, parent_dir)
|
| 11 |
+
from utils import *
|
| 12 |
+
from simulator import *
|
| 13 |
+
from broadcast import *
|
| 14 |
+
import networkx as nx
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def validate_broadcast_topology(bc_t, source_node, terminal_nodes, num_partitions, G):
|
| 18 |
+
"""
|
| 19 |
+
Validate that the broadcast topology is complete and correct.
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
(is_valid, error_message) tuple
|
| 23 |
+
"""
|
| 24 |
+
# Check 1: Verify all destinations are present
|
| 25 |
+
if set(bc_t.dsts) != set(terminal_nodes):
|
| 26 |
+
missing_dsts = set(terminal_nodes) - set(bc_t.dsts)
|
| 27 |
+
extra_dsts = set(bc_t.dsts) - set(terminal_nodes)
|
| 28 |
+
return False, f"Destination mismatch: missing={missing_dsts}, extra={extra_dsts}"
|
| 29 |
+
|
| 30 |
+
# Check 2: Verify source matches
|
| 31 |
+
if bc_t.src != source_node:
|
| 32 |
+
return False, f"Source mismatch: expected={source_node}, got={bc_t.src}"
|
| 33 |
+
|
| 34 |
+
# Check 3: Verify all partitions exist for all destinations
|
| 35 |
+
missing_partitions = []
|
| 36 |
+
empty_partitions = []
|
| 37 |
+
invalid_paths = []
|
| 38 |
+
|
| 39 |
+
for dst in terminal_nodes:
|
| 40 |
+
if dst not in bc_t.paths:
|
| 41 |
+
return False, f"Missing destination '{dst}' in paths"
|
| 42 |
+
|
| 43 |
+
for partition_id in range(num_partitions):
|
| 44 |
+
partition_key = str(partition_id)
|
| 45 |
+
|
| 46 |
+
# Check if partition exists
|
| 47 |
+
if partition_key not in bc_t.paths[dst]:
|
| 48 |
+
missing_partitions.append((dst, partition_id))
|
| 49 |
+
continue
|
| 50 |
+
|
| 51 |
+
partition_paths = bc_t.paths[dst][partition_key]
|
| 52 |
+
|
| 53 |
+
# Check if partition paths are None or empty
|
| 54 |
+
if partition_paths is None or len(partition_paths) == 0:
|
| 55 |
+
empty_partitions.append((dst, partition_id))
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
# Check 4: Verify paths form valid routes from source to destination
|
| 59 |
+
# Build a path from edges
|
| 60 |
+
path_nodes = [source_node]
|
| 61 |
+
path_valid = True
|
| 62 |
+
|
| 63 |
+
for edge in partition_paths:
|
| 64 |
+
if len(edge) < 3:
|
| 65 |
+
invalid_paths.append((dst, partition_id, "edge format invalid"))
|
| 66 |
+
path_valid = False
|
| 67 |
+
break
|
| 68 |
+
|
| 69 |
+
edge_src, edge_dst, edge_data = edge[0], edge[1], edge[2]
|
| 70 |
+
|
| 71 |
+
# Verify edge exists in graph
|
| 72 |
+
if not G.has_edge(edge_src, edge_dst):
|
| 73 |
+
invalid_paths.append((dst, partition_id, f"edge {edge_src}->{edge_dst} not in graph"))
|
| 74 |
+
path_valid = False
|
| 75 |
+
break
|
| 76 |
+
|
| 77 |
+
# Verify path continuity
|
| 78 |
+
if path_nodes[-1] != edge_src:
|
| 79 |
+
invalid_paths.append((dst, partition_id, f"path discontinuity: expected {path_nodes[-1]}, got {edge_src}"))
|
| 80 |
+
path_valid = False
|
| 81 |
+
break
|
| 82 |
+
|
| 83 |
+
path_nodes.append(edge_dst)
|
| 84 |
+
|
| 85 |
+
# Check if path reaches destination (only if path was valid so far)
|
| 86 |
+
if path_valid and path_nodes[-1] != dst:
|
| 87 |
+
invalid_paths.append((dst, partition_id, f"path does not reach destination: ends at {path_nodes[-1]}, expected {dst}"))
|
| 88 |
+
|
| 89 |
+
# Compile validation errors
|
| 90 |
+
errors = []
|
| 91 |
+
if missing_partitions:
|
| 92 |
+
errors.append(f"Missing partitions: {missing_partitions}")
|
| 93 |
+
if empty_partitions:
|
| 94 |
+
errors.append(f"Empty partitions: {empty_partitions}")
|
| 95 |
+
if invalid_paths:
|
| 96 |
+
errors.append(f"Invalid paths: {invalid_paths}")
|
| 97 |
+
|
| 98 |
+
if errors:
|
| 99 |
+
return False, "Validation failed: " + "; ".join(errors)
|
| 100 |
+
|
| 101 |
+
# Check 5: Verify all data volumes are accounted for
|
| 102 |
+
# Count total partitions that should be transferred
|
| 103 |
+
expected_total_partitions = len(terminal_nodes) * num_partitions
|
| 104 |
+
|
| 105 |
+
# Count partitions actually present
|
| 106 |
+
actual_partitions = 0
|
| 107 |
+
for dst in terminal_nodes:
|
| 108 |
+
for partition_id in range(num_partitions):
|
| 109 |
+
partition_key = str(partition_id)
|
| 110 |
+
if (partition_key in bc_t.paths[dst] and
|
| 111 |
+
bc_t.paths[dst][partition_key] is not None and
|
| 112 |
+
len(bc_t.paths[dst][partition_key]) > 0):
|
| 113 |
+
actual_partitions += 1
|
| 114 |
+
|
| 115 |
+
if actual_partitions != expected_total_partitions:
|
| 116 |
+
return False, f"Data loss detected: expected {expected_total_partitions} partitions, got {actual_partitions}"
|
| 117 |
+
|
| 118 |
+
return True, None
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def evaluate(program_path):
|
| 122 |
+
"""
|
| 123 |
+
Evaluate the evolved broadcast optimization program across multiple configurations.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
program_path: Path to the evolved program file
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
Dictionary with evaluation metrics including required 'combined_score'
|
| 130 |
+
"""
|
| 131 |
+
try:
|
| 132 |
+
# Load the evolved program
|
| 133 |
+
spec = importlib.util.spec_from_file_location("program", program_path)
|
| 134 |
+
program = importlib.util.module_from_spec(spec)
|
| 135 |
+
spec.loader.exec_module(program)
|
| 136 |
+
|
| 137 |
+
# Check if the required function exists
|
| 138 |
+
if not hasattr(program, "search_algorithm"):
|
| 139 |
+
return {
|
| 140 |
+
"combined_score": 0.0,
|
| 141 |
+
"runs_successfully": 0.0,
|
| 142 |
+
"error": "Missing search_algorithm function"
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
# Configuration - individual JSON file paths (relative to evaluator location)
|
| 146 |
+
evaluator_dir = os.path.dirname(os.path.abspath(__file__))
|
| 147 |
+
config_files = [
|
| 148 |
+
os.path.join(evaluator_dir, "examples/config/intra_aws.json"),
|
| 149 |
+
os.path.join(evaluator_dir, "examples/config/intra_azure.json"),
|
| 150 |
+
os.path.join(evaluator_dir, "examples/config/intra_gcp.json"),
|
| 151 |
+
os.path.join(evaluator_dir, "examples/config/inter_agz.json"),
|
| 152 |
+
os.path.join(evaluator_dir, "examples/config/inter_gaz2.json")
|
| 153 |
+
]
|
| 154 |
+
|
| 155 |
+
# Filter to only include files that exist
|
| 156 |
+
existing_configs = [f for f in config_files if os.path.exists(f)]
|
| 157 |
+
|
| 158 |
+
if not existing_configs:
|
| 159 |
+
return {
|
| 160 |
+
"combined_score": 0.0,
|
| 161 |
+
"runs_successfully": 0.0,
|
| 162 |
+
"error": f"No configuration files found. Checked: {config_files}"
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
num_vms = 2
|
| 166 |
+
total_cost = 0.0
|
| 167 |
+
successful_configs = 0
|
| 168 |
+
failed_configs = 0
|
| 169 |
+
|
| 170 |
+
# Process each configuration file
|
| 171 |
+
for jsonfile in existing_configs:
|
| 172 |
+
try:
|
| 173 |
+
print(f"Processing config: {os.path.basename(jsonfile)}")
|
| 174 |
+
|
| 175 |
+
# Load configuration
|
| 176 |
+
with open(jsonfile, "r") as f:
|
| 177 |
+
config_name = os.path.basename(jsonfile).split(".")[0]
|
| 178 |
+
config = json.loads(f.read())
|
| 179 |
+
|
| 180 |
+
# Create graph
|
| 181 |
+
G = make_nx_graph(num_vms=int(num_vms))
|
| 182 |
+
|
| 183 |
+
# Source and destination nodes
|
| 184 |
+
source_node = config["source_node"]
|
| 185 |
+
terminal_nodes = config["dest_nodes"]
|
| 186 |
+
|
| 187 |
+
# Create output directory
|
| 188 |
+
directory = f"paths/{config_name}"
|
| 189 |
+
if not os.path.exists(directory):
|
| 190 |
+
Path(directory).mkdir(parents=True, exist_ok=True)
|
| 191 |
+
|
| 192 |
+
# Run the evolved algorithm
|
| 193 |
+
num_partitions = config["num_partitions"]
|
| 194 |
+
bc_t = program.search_algorithm(source_node, terminal_nodes, G, num_partitions)
|
| 195 |
+
|
| 196 |
+
bc_t.set_num_partitions(config["num_partitions"])
|
| 197 |
+
|
| 198 |
+
# Validate the broadcast topology before evaluation
|
| 199 |
+
is_valid, validation_error = validate_broadcast_topology(
|
| 200 |
+
bc_t, source_node, terminal_nodes, num_partitions, G
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
if not is_valid:
|
| 204 |
+
print(f"Validation failed for {config_name}: {validation_error}")
|
| 205 |
+
# raise ValueError(f"Invalid broadcast topology: {validation_error}")
|
| 206 |
+
return {
|
| 207 |
+
"combined_score": 0.0,
|
| 208 |
+
"runs_successfully": 0.0,
|
| 209 |
+
"error": f"Invalid broadcast topology: {validation_error}"
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
# Save the generated paths
|
| 213 |
+
outf = f"{directory}/search_algorithm.json"
|
| 214 |
+
with open(outf, "w") as outfile:
|
| 215 |
+
outfile.write(
|
| 216 |
+
json.dumps(
|
| 217 |
+
{
|
| 218 |
+
"algo": "search_algorithm",
|
| 219 |
+
"source_node": bc_t.src,
|
| 220 |
+
"terminal_nodes": bc_t.dsts,
|
| 221 |
+
"num_partitions": bc_t.num_partitions,
|
| 222 |
+
"generated_path": bc_t.paths,
|
| 223 |
+
}
|
| 224 |
+
)
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# Evaluate the generated paths
|
| 228 |
+
input_dir = f"paths/{config_name}"
|
| 229 |
+
output_dir = f"evals/{config_name}"
|
| 230 |
+
if not os.path.exists(output_dir):
|
| 231 |
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
| 232 |
+
|
| 233 |
+
# Run simulation
|
| 234 |
+
simulator = BCSimulator(int(num_vms), output_dir)
|
| 235 |
+
_, cost = simulator.evaluate_path(outf, config)
|
| 236 |
+
|
| 237 |
+
# Accumulate results
|
| 238 |
+
total_cost += cost
|
| 239 |
+
successful_configs += 1
|
| 240 |
+
|
| 241 |
+
print(f"Config {config_name}: cost={cost:.2f}")
|
| 242 |
+
|
| 243 |
+
except Exception as e:
|
| 244 |
+
print(f"Failed to process {os.path.basename(jsonfile)}: {str(e)}")
|
| 245 |
+
failed_configs += 1
|
| 246 |
+
break
|
| 247 |
+
|
| 248 |
+
# Check if we have any successful evaluations
|
| 249 |
+
if failed_configs != 0:
|
| 250 |
+
return {
|
| 251 |
+
"combined_score": 0.0,
|
| 252 |
+
"runs_successfully": 0.0,
|
| 253 |
+
"error": "1 or more configuration files failed to process"
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
# Calculate aggregate metrics
|
| 257 |
+
avg_cost = total_cost / successful_configs
|
| 258 |
+
success_rate = successful_configs / (successful_configs + failed_configs)
|
| 259 |
+
|
| 260 |
+
print(f"Summary: {successful_configs} successful, {failed_configs} failed")
|
| 261 |
+
print(f"Total cost: {total_cost:.2f}")
|
| 262 |
+
|
| 263 |
+
# Calculate metrics for SkyDiscover
|
| 264 |
+
# Normalize scores (higher is better)
|
| 265 |
+
cost_score = 1.0 / (1.0 + total_cost) # Lower cost = higher score
|
| 266 |
+
|
| 267 |
+
# Combined score considering total cost, and success rate
|
| 268 |
+
combined_score = cost_score
|
| 269 |
+
|
| 270 |
+
return {
|
| 271 |
+
"combined_score": combined_score, # Required by SkyDiscover
|
| 272 |
+
"runs_successfully": success_rate,
|
| 273 |
+
"total_cost": total_cost,
|
| 274 |
+
"avg_cost": avg_cost,
|
| 275 |
+
"successful_configs": successful_configs,
|
| 276 |
+
"failed_configs": failed_configs,
|
| 277 |
+
"cost_score": cost_score,
|
| 278 |
+
"success_rate": success_rate
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
except Exception as e:
|
| 282 |
+
print(f"Evaluation failed: {str(e)}")
|
| 283 |
+
print(traceback.format_exc())
|
| 284 |
+
return {
|
| 285 |
+
"combined_score": 0.0, # Required by SkyDiscover
|
| 286 |
+
"runs_successfully": 0.0,
|
| 287 |
+
"error": str(e)
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
if __name__ == "__main__":
|
| 292 |
+
# Backwards-compat: bridges old evaluate() -> dict to the container JSON
|
| 293 |
+
# protocol. wrapper.py is auto-injected at build time from
|
| 294 |
+
# skydiscover/evaluation/wrapper.py.
|
| 295 |
+
from wrapper import run
|
| 296 |
+
|
| 297 |
+
run(evaluate)
|
benchmarks/ADRS/cloudcast/evaluator/requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
networkx>=3.2,<3.4
|
| 2 |
+
pandas
|
benchmarks/ADRS/cloudcast/evaluator/simulator.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import networkx as nx
|
| 3 |
+
import json
|
| 4 |
+
from broadcast import *
|
| 5 |
+
from utils import *
|
| 6 |
+
|
| 7 |
+
class BCSimulator:
|
| 8 |
+
# Default variables
|
| 9 |
+
data_vol: float = 4.0 # size of data to be sent to multiple dsts
|
| 10 |
+
num_partitions: int = 1
|
| 11 |
+
partition_data_vol: int = data_vol / num_partitions
|
| 12 |
+
default_vms_per_region: int = 1
|
| 13 |
+
cost_per_instance_hr: float = 0.54 # based on m5.8xlarge spot
|
| 14 |
+
src: str
|
| 15 |
+
dsts: List[str]
|
| 16 |
+
algo: str
|
| 17 |
+
g = nx.DiGraph
|
| 18 |
+
|
| 19 |
+
def __init__(self, num_vms, output_dir=None):
|
| 20 |
+
# write output to file
|
| 21 |
+
self.output_dir = output_dir
|
| 22 |
+
self.default_vms_per_region = num_vms
|
| 23 |
+
|
| 24 |
+
def initialization(self, path, config):
|
| 25 |
+
# check if path is dict
|
| 26 |
+
if isinstance(path, str):
|
| 27 |
+
# Read from json
|
| 28 |
+
with open(path, "r") as f:
|
| 29 |
+
data = json.loads(f.read())
|
| 30 |
+
else:
|
| 31 |
+
data = {
|
| 32 |
+
"algo": "none",
|
| 33 |
+
"source_node": path.src,
|
| 34 |
+
"terminal_nodes": path.dsts,
|
| 35 |
+
"num_partitions": path.num_partitions,
|
| 36 |
+
"generated_path": path.paths,
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
self.src = data["source_node"]
|
| 40 |
+
self.dsts = data["terminal_nodes"]
|
| 41 |
+
self.algo = data["algo"]
|
| 42 |
+
self.paths = data["generated_path"]
|
| 43 |
+
|
| 44 |
+
self.num_partitions = config["num_partitions"]
|
| 45 |
+
self.data_vol = config["data_vol"]
|
| 46 |
+
self.partition_data_vol = self.data_vol / self.num_partitions
|
| 47 |
+
|
| 48 |
+
# Default in/egress limit if not set
|
| 49 |
+
providers = ["aws", "gcp", "azure"]
|
| 50 |
+
provider_ingress = [10, 16, 16]
|
| 51 |
+
provider_egress = [5, 7, 16]
|
| 52 |
+
self.ingress_limits = {providers[i]: provider_ingress[i] for i in range(len(providers))}
|
| 53 |
+
self.egress_limits = {providers[i]: provider_egress[i] for i in range(len(providers))}
|
| 54 |
+
|
| 55 |
+
if "ingress_limit" in config:
|
| 56 |
+
for p, limit in config["ingress_limit"].items():
|
| 57 |
+
self.ingress_limits[p] = self.default_vms_per_region * limit
|
| 58 |
+
|
| 59 |
+
if "egress_limit" in config:
|
| 60 |
+
for p, limit in config["egress_limit"].items():
|
| 61 |
+
self.egress_limits[p] = self.default_vms_per_region * limit
|
| 62 |
+
# print("Data vol (Gbit): ", self.data_vol * 8)
|
| 63 |
+
print("Ingress limits: ", self.ingress_limits)
|
| 64 |
+
print("Egress limits: ", self.egress_limits)
|
| 65 |
+
|
| 66 |
+
def evaluate_path(self, path, config, write_to_file=False):
|
| 67 |
+
print(f"\n==============> Evaluation")
|
| 68 |
+
self.initialization(path, config)
|
| 69 |
+
|
| 70 |
+
# construct graph
|
| 71 |
+
print(f"\n--------- Algo: {self.algo}")
|
| 72 |
+
self.g = self.__construct_g()
|
| 73 |
+
print("\n=> Data path to dests")
|
| 74 |
+
for path in self.__get_path():
|
| 75 |
+
print("--")
|
| 76 |
+
print(path)
|
| 77 |
+
for i in range(len(path) - 1):
|
| 78 |
+
print(f"Flow: {self.g[path[i]][path[i+1]]['flow']}")
|
| 79 |
+
print(f"Actual throughput: {round(self.g[path[i]][path[i+1]]['throughput'], 4)}")
|
| 80 |
+
print(f"Cost: {self.g[path[i]][path[i+1]]['cost']}\n")
|
| 81 |
+
|
| 82 |
+
# evaluate transfer time and total cost
|
| 83 |
+
max_t, avg_t, last_dst = self.__transfer_time()
|
| 84 |
+
self.cost = self.__total_cost()
|
| 85 |
+
|
| 86 |
+
# output to json file
|
| 87 |
+
if write_to_file:
|
| 88 |
+
open(f"{self.output_dir}/{self.algo}_eval.json", "w").write(
|
| 89 |
+
json.dumps(
|
| 90 |
+
{
|
| 91 |
+
"path": path,
|
| 92 |
+
"max_transfer_time": max_t,
|
| 93 |
+
"avg_transfer_time": avg_t,
|
| 94 |
+
"last_dst": last_dst,
|
| 95 |
+
"tot_cost": self.cost,
|
| 96 |
+
}
|
| 97 |
+
)
|
| 98 |
+
)
|
| 99 |
+
return max_t, self.cost
|
| 100 |
+
|
| 101 |
+
def __construct_g(self):
|
| 102 |
+
# construct a graph based on the given topology
|
| 103 |
+
g = nx.DiGraph()
|
| 104 |
+
for dst in self.dsts:
|
| 105 |
+
for partition_id in range(self.num_partitions):
|
| 106 |
+
print(self.paths)
|
| 107 |
+
print("Num of partitions: ", self.num_partitions)
|
| 108 |
+
for edge in self.paths[dst][str(partition_id)]:
|
| 109 |
+
src, dst, edge_data = edge[0], edge[1], edge[2]
|
| 110 |
+
if not g.has_edge(src, dst):
|
| 111 |
+
cost = edge_data["cost"]
|
| 112 |
+
throughput = edge_data["throughput"] # * self.default_vms_per_region
|
| 113 |
+
g.add_edge(src, dst, throughput=throughput, cost=edge_data["cost"], flow=throughput)
|
| 114 |
+
g[src][dst]["partitions"] = set()
|
| 115 |
+
g[src][dst]["partitions"].add(partition_id)
|
| 116 |
+
|
| 117 |
+
print(f"Default vms: {self.default_vms_per_region}")
|
| 118 |
+
# Proportionally share if exceed in/egress limit of any node
|
| 119 |
+
for node in g.nodes:
|
| 120 |
+
provider = node.split(":")[0]
|
| 121 |
+
|
| 122 |
+
in_edges, out_edges = g.in_edges(node), g.out_edges(node)
|
| 123 |
+
in_flow_sum = sum([g[i[0]][i[1]]["flow"] for i in in_edges])
|
| 124 |
+
out_flow_sum = sum([g[o[0]][o[1]]["flow"] for o in out_edges])
|
| 125 |
+
|
| 126 |
+
if in_flow_sum > self.ingress_limits[provider]:
|
| 127 |
+
# print("\nExceed ingress limit")
|
| 128 |
+
for edge in in_edges:
|
| 129 |
+
src, dst = edge[0], edge[1]
|
| 130 |
+
# assign based on flow proportion
|
| 131 |
+
# flow_proportion = g[src][dst]['throughput'] / in_flow_sum
|
| 132 |
+
|
| 133 |
+
# or assign based on num of incoming flows
|
| 134 |
+
flow_proportion = 1 / len(list(in_edges))
|
| 135 |
+
|
| 136 |
+
g[src][dst]["flow"] = min(g[src][dst]["flow"], self.ingress_limits[provider] * flow_proportion)
|
| 137 |
+
|
| 138 |
+
if out_flow_sum > self.egress_limits[provider]:
|
| 139 |
+
# print("\nExceed egress limit")
|
| 140 |
+
for edge in out_edges:
|
| 141 |
+
src, dst = edge[0], edge[1]
|
| 142 |
+
|
| 143 |
+
# assign based on flow proportion
|
| 144 |
+
# flow_proportion = g[src][dst]['throughput'] / out_flow_sum
|
| 145 |
+
|
| 146 |
+
# or assign based on num of incoming flows
|
| 147 |
+
flow_proportion = 1 / len(list(out_edges))
|
| 148 |
+
|
| 149 |
+
print(f"src: {src}, dst: {dst}, flow proportion: {flow_proportion}")
|
| 150 |
+
g[src][dst]["flow"] = min(g[src][dst]["flow"], self.egress_limits[provider] * flow_proportion)
|
| 151 |
+
|
| 152 |
+
return g
|
| 153 |
+
|
| 154 |
+
def __get_path(self):
|
| 155 |
+
all_paths = [path for node in self.dsts for path in nx.all_simple_paths(self.g, self.src, node)]
|
| 156 |
+
return all_paths
|
| 157 |
+
|
| 158 |
+
def __slowest_capacity_link(self):
|
| 159 |
+
min_tput = min([edge[-1]["throughput"] for edge in self.g.edges().data()])
|
| 160 |
+
return min_tput
|
| 161 |
+
|
| 162 |
+
def __transfer_time(self, log=True):
|
| 163 |
+
# time for each (src, dst) pair
|
| 164 |
+
t_dict = dict()
|
| 165 |
+
for dst in self.dsts:
|
| 166 |
+
partition_time = float("-inf")
|
| 167 |
+
for i in range(self.num_partitions):
|
| 168 |
+
path_edges = self.paths[dst][str(i)]
|
| 169 |
+
bottleneck = min(self.g[e[0]][e[1]]['flow'] for e in path_edges)
|
| 170 |
+
t = self.partition_data_vol / bottleneck if bottleneck > 0 else float('inf')
|
| 171 |
+
partition_time = max(partition_time, t)
|
| 172 |
+
t_dict[dst] = partition_time
|
| 173 |
+
|
| 174 |
+
max_t = max(t_dict.values())
|
| 175 |
+
last_dst = [k for k, v in t_dict.items() if v == max_t] # last dst receiving obj
|
| 176 |
+
avg_t = sum(t_dict.values()) / len(t_dict.values())
|
| 177 |
+
return max_t, avg_t, last_dst
|
| 178 |
+
|
| 179 |
+
def __total_cost(self):
|
| 180 |
+
sum_egress_cost = 0
|
| 181 |
+
for edge in self.g.edges.data():
|
| 182 |
+
edge_data = edge[-1]
|
| 183 |
+
sum_egress_cost += (
|
| 184 |
+
len(edge_data["partitions"]) * self.partition_data_vol * edge_data["cost"]
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
runtime_s, _, _ = self.__transfer_time(log=False)
|
| 188 |
+
runtime_s = round(runtime_s, 2)
|
| 189 |
+
sum_instance_cost = 0
|
| 190 |
+
for node in self.g.nodes():
|
| 191 |
+
# print("Default vm per region: ", self.default_vms_per_region)
|
| 192 |
+
# print("Cost per instance hr: ", (self.cost_per_instance_hr / 3600) * runtime_s)
|
| 193 |
+
sum_instance_cost += self.default_vms_per_region * (self.cost_per_instance_hr / 3600) * runtime_s
|
| 194 |
+
|
| 195 |
+
sum_cost = sum_egress_cost + sum_instance_cost
|
| 196 |
+
return sum_cost
|
benchmarks/ADRS/cloudcast/evaluator/utils.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import networkx as nx
|
| 2 |
+
from broadcast import *
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import time
|
| 5 |
+
import functools
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
GBIT_PER_GBYTE = 8
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Timer:
|
| 13 |
+
def __init__(self, print_desc=None):
|
| 14 |
+
self.print_desc = print_desc
|
| 15 |
+
self.start = time.time()
|
| 16 |
+
self.end = None
|
| 17 |
+
|
| 18 |
+
def __enter__(self):
|
| 19 |
+
return self
|
| 20 |
+
|
| 21 |
+
def __exit__(self, exc_typ, exc_val, exc_tb):
|
| 22 |
+
self.end = time.time()
|
| 23 |
+
|
| 24 |
+
@property
|
| 25 |
+
def elapsed(self):
|
| 26 |
+
if self.end is None:
|
| 27 |
+
end = time.time()
|
| 28 |
+
return end - self.start
|
| 29 |
+
else:
|
| 30 |
+
return self.end - self.start
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@functools.lru_cache(maxsize=None)
|
| 34 |
+
def get_path_cost(src, dst, src_tier="PREMIUM", dst_tier="PREMIUM"):
|
| 35 |
+
from skyplane import compute
|
| 36 |
+
|
| 37 |
+
assert src_tier == "PREMIUM" and dst_tier == "PREMIUM"
|
| 38 |
+
return compute.CloudProvider.get_transfer_cost(src, dst)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def make_nx_graph(cost_path=None, throughput_path=None, num_vms=1):
|
| 42 |
+
"""
|
| 43 |
+
Default graph with capacity constraints and cost info
|
| 44 |
+
nodes: regions, edges: links
|
| 45 |
+
per edge:
|
| 46 |
+
throughput: max tput achievable (gbps)
|
| 47 |
+
cost: $/GB
|
| 48 |
+
flow: actual flow (gbps), must be < throughput, default = 0
|
| 49 |
+
"""
|
| 50 |
+
if cost_path is None:
|
| 51 |
+
# Use relative path from utils.py location
|
| 52 |
+
utils_dir = os.path.dirname(os.path.abspath(__file__))
|
| 53 |
+
cost = pd.read_csv(os.path.join(utils_dir, "profiles/cost.csv"))
|
| 54 |
+
else:
|
| 55 |
+
cost = pd.read_csv(cost_path)
|
| 56 |
+
|
| 57 |
+
if throughput_path is None:
|
| 58 |
+
# Use relative path from utils.py location
|
| 59 |
+
utils_dir = os.path.dirname(os.path.abspath(__file__))
|
| 60 |
+
throughput = pd.read_csv(os.path.join(utils_dir, "profiles/throughput.csv"))
|
| 61 |
+
else:
|
| 62 |
+
throughput = pd.read_csv(throughput_path)
|
| 63 |
+
|
| 64 |
+
G = nx.DiGraph()
|
| 65 |
+
for _, row in throughput.iterrows():
|
| 66 |
+
if row["src_region"] == row["dst_region"]:
|
| 67 |
+
continue
|
| 68 |
+
G.add_edge(row["src_region"], row["dst_region"], cost=None, throughput=num_vms * row["throughput_sent"] / 1e9)
|
| 69 |
+
|
| 70 |
+
for _, row in cost.iterrows():
|
| 71 |
+
if row["src"] in G and row["dest"] in G[row["src"]]:
|
| 72 |
+
G[row["src"]][row["dest"]]["cost"] = row["cost"]
|
| 73 |
+
|
| 74 |
+
# some pairs not in the cost grid
|
| 75 |
+
no_cost_pairs = []
|
| 76 |
+
for edge in G.edges.data():
|
| 77 |
+
src, dst = edge[0], edge[1]
|
| 78 |
+
if edge[-1]["cost"] is None:
|
| 79 |
+
no_cost_pairs.append((src, dst))
|
| 80 |
+
print("Unable to get costs for: ", no_cost_pairs)
|
| 81 |
+
|
| 82 |
+
return G
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def push_flow_helper(src, g, ingress_limit=10 * 5, egress_limit=10 * 5):
|
| 86 |
+
"""
|
| 87 |
+
Push positive flows in the constructed paths (g) under constraints
|
| 88 |
+
"""
|
| 89 |
+
for child in list(g.successors(src)):
|
| 90 |
+
dfs_edges = [edge for edge in nx.dfs_edges(g, source=child)]
|
| 91 |
+
dfs_min = float("inf") if not dfs_edges else min([g[t[0]][t[1]]["throughput"] for t in dfs_edges])
|
| 92 |
+
min_flow = min([dfs_min, g[src][child]["throughput"], ingress_limit, egress_limit])
|
| 93 |
+
|
| 94 |
+
# assign flows
|
| 95 |
+
g[src][child]["flow"] = min_flow
|
| 96 |
+
for t in dfs_edges:
|
| 97 |
+
g[t[0]][t[1]]["flow"] = min_flow
|
| 98 |
+
return g
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def append_src_dst_paths(src, dsts, G, bc_topology):
|
| 102 |
+
# Append src dst paths for partitions (all partitions follow the same path)
|
| 103 |
+
for dst in dsts:
|
| 104 |
+
for path in list(nx.all_simple_paths(G, src, dst)):
|
| 105 |
+
for i in range(0, len(path) - 1):
|
| 106 |
+
s, t = path[i], path[i + 1]
|
| 107 |
+
for j in range(bc_topology.num_partitions):
|
| 108 |
+
bc_topology.append_dst_partition_path(dst, j, [s, t, G[s][t]])
|
| 109 |
+
return bc_topology
|
benchmarks/ADRS/cloudcast/evaluator/wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Backwards-compat wrapper for old Python-based evaluators.
|
| 2 |
+
|
| 3 |
+
Old-style evaluators define ``evaluate(program_path) -> dict``. This module
|
| 4 |
+
bridges that interface to the container JSON protocol expected by
|
| 5 |
+
ContainerizedEvaluator.
|
| 6 |
+
|
| 7 |
+
Usage — add this to the bottom of your evaluator.py::
|
| 8 |
+
|
| 9 |
+
if __name__ == "__main__":
|
| 10 |
+
from wrapper import run
|
| 11 |
+
run(evaluate)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import sys
|
| 16 |
+
import traceback
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def run(evaluate_fn):
|
| 20 |
+
"""Call *evaluate_fn*, format the result as container-protocol JSON on stdout.
|
| 21 |
+
|
| 22 |
+
* Reads ``sys.argv[1]`` as the program path.
|
| 23 |
+
* Redirects stdout → stderr while *evaluate_fn* runs so that debug prints
|
| 24 |
+
don't contaminate the JSON output.
|
| 25 |
+
* Separates numeric metrics from non-numeric artifacts.
|
| 26 |
+
* Guarantees ``combined_score`` is always present in metrics.
|
| 27 |
+
"""
|
| 28 |
+
if len(sys.argv) < 2:
|
| 29 |
+
print("Usage: evaluator.py <program_path>", file=sys.stderr)
|
| 30 |
+
sys.exit(1)
|
| 31 |
+
|
| 32 |
+
program_path = sys.argv[1]
|
| 33 |
+
|
| 34 |
+
# Redirect stdout → stderr during evaluation so debug prints from
|
| 35 |
+
# the evaluator don't contaminate the JSON output on stdout.
|
| 36 |
+
real_stdout = sys.stdout
|
| 37 |
+
sys.stdout = sys.stderr
|
| 38 |
+
try:
|
| 39 |
+
result = evaluate_fn(program_path)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
sys.stdout = real_stdout
|
| 42 |
+
print(
|
| 43 |
+
json.dumps(
|
| 44 |
+
{
|
| 45 |
+
"status": "error",
|
| 46 |
+
"combined_score": 0.0,
|
| 47 |
+
"metrics": {"combined_score": 0.0},
|
| 48 |
+
"artifacts": {
|
| 49 |
+
"error": str(e),
|
| 50 |
+
"traceback": traceback.format_exc(),
|
| 51 |
+
},
|
| 52 |
+
}
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
return
|
| 56 |
+
sys.stdout = real_stdout
|
| 57 |
+
|
| 58 |
+
if not isinstance(result, dict):
|
| 59 |
+
print(
|
| 60 |
+
json.dumps(
|
| 61 |
+
{
|
| 62 |
+
"status": "error",
|
| 63 |
+
"combined_score": 0.0,
|
| 64 |
+
"metrics": {"combined_score": 0.0},
|
| 65 |
+
"artifacts": {
|
| 66 |
+
"error": f"evaluate() returned {type(result).__name__}, expected dict"
|
| 67 |
+
},
|
| 68 |
+
}
|
| 69 |
+
)
|
| 70 |
+
)
|
| 71 |
+
return
|
| 72 |
+
|
| 73 |
+
# Separate numeric metrics from non-numeric artifacts.
|
| 74 |
+
metrics = {}
|
| 75 |
+
artifacts = {}
|
| 76 |
+
for k, v in result.items():
|
| 77 |
+
if isinstance(v, bool):
|
| 78 |
+
metrics[k] = float(v)
|
| 79 |
+
elif isinstance(v, (int, float)):
|
| 80 |
+
metrics[k] = float(v)
|
| 81 |
+
elif isinstance(v, str):
|
| 82 |
+
artifacts[k] = v
|
| 83 |
+
elif isinstance(v, (list, dict)):
|
| 84 |
+
artifacts[k] = json.dumps(v)
|
| 85 |
+
|
| 86 |
+
if "combined_score" not in metrics:
|
| 87 |
+
metrics["combined_score"] = 0.0
|
| 88 |
+
|
| 89 |
+
status = "error" if "error" in artifacts else "success"
|
| 90 |
+
output = {
|
| 91 |
+
"status": status,
|
| 92 |
+
"combined_score": metrics["combined_score"],
|
| 93 |
+
"metrics": metrics,
|
| 94 |
+
}
|
| 95 |
+
if artifacts:
|
| 96 |
+
output["artifacts"] = artifacts
|
| 97 |
+
|
| 98 |
+
print(json.dumps(output))
|
benchmarks/ADRS/cloudcast/initial_program.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
import networkx as nx
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from typing import Dict, List
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def search_algorithm(src, dsts, G, num_partitions):
|
| 10 |
+
h = G.copy()
|
| 11 |
+
h.remove_edges_from(list(h.in_edges(src)) + list(nx.selfloop_edges(h)))
|
| 12 |
+
bc_topology = BroadCastTopology(src, dsts, num_partitions)
|
| 13 |
+
|
| 14 |
+
for dst in dsts:
|
| 15 |
+
path = nx.dijkstra_path(h, src, dst, weight="cost")
|
| 16 |
+
for i in range(0, len(path) - 1):
|
| 17 |
+
s, t = path[i], path[i + 1]
|
| 18 |
+
for j in range(bc_topology.num_partitions):
|
| 19 |
+
bc_topology.append_dst_partition_path(dst, j, [s, t, G[s][t]])
|
| 20 |
+
|
| 21 |
+
return bc_topology
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class SingleDstPath(Dict):
|
| 25 |
+
partition: int
|
| 26 |
+
edges: List[List] # [[src, dst, edge data]]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class BroadCastTopology:
|
| 30 |
+
def __init__(self, src: str, dsts: List[str], num_partitions: int = 4, paths: Dict[str, SingleDstPath] = None):
|
| 31 |
+
self.src = src # single str
|
| 32 |
+
self.dsts = dsts # list of strs
|
| 33 |
+
self.num_partitions = num_partitions
|
| 34 |
+
|
| 35 |
+
# dict(dst) --> dict(partition) --> list(nx.edges)
|
| 36 |
+
# example: {dst1: {partition1: [src->node1, node1->dst1], partition 2: [src->dst1]}}
|
| 37 |
+
if paths is not None:
|
| 38 |
+
self.paths = paths
|
| 39 |
+
self.set_graph()
|
| 40 |
+
else:
|
| 41 |
+
self.paths = {dst: {str(i): None for i in range(num_partitions)} for dst in dsts}
|
| 42 |
+
|
| 43 |
+
def get_paths(self):
|
| 44 |
+
print(f"now the set path is: {self.paths}")
|
| 45 |
+
return self.paths
|
| 46 |
+
|
| 47 |
+
def set_num_partitions(self, num_partitions: int):
|
| 48 |
+
self.num_partitions = num_partitions
|
| 49 |
+
|
| 50 |
+
def set_dst_partition_paths(self, dst: str, partition: int, paths: List[List]):
|
| 51 |
+
"""
|
| 52 |
+
Set paths for partition = partition to reach dst
|
| 53 |
+
"""
|
| 54 |
+
partition = str(partition)
|
| 55 |
+
self.paths[dst][partition] = paths
|
| 56 |
+
|
| 57 |
+
def append_dst_partition_path(self, dst: str, partition: int, path: List):
|
| 58 |
+
"""
|
| 59 |
+
Append path for partition = partition to reach dst
|
| 60 |
+
"""
|
| 61 |
+
partition = str(partition)
|
| 62 |
+
if self.paths[dst][partition] is None:
|
| 63 |
+
self.paths[dst][partition] = []
|
| 64 |
+
self.paths[dst][partition].append(path)
|
| 65 |
+
|
| 66 |
+
def make_nx_graph(cost_path=None, throughput_path=None, num_vms=1):
|
| 67 |
+
"""
|
| 68 |
+
Default graph with capacity constraints and cost info
|
| 69 |
+
nodes: regions, edges: links
|
| 70 |
+
per edge:
|
| 71 |
+
throughput: max tput achievable (gbps)
|
| 72 |
+
cost: $/GB
|
| 73 |
+
flow: actual flow (gbps), must be < throughput, default = 0
|
| 74 |
+
"""
|
| 75 |
+
# Use relative path from this file's location
|
| 76 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 77 |
+
|
| 78 |
+
if cost_path is None:
|
| 79 |
+
cost = pd.read_csv(os.path.join(current_dir, "profiles/cost.csv"))
|
| 80 |
+
else:
|
| 81 |
+
cost = pd.read_csv(cost_path)
|
| 82 |
+
|
| 83 |
+
if throughput_path is None:
|
| 84 |
+
throughput = pd.read_csv(os.path.join(current_dir, "profiles/throughput.csv"))
|
| 85 |
+
else:
|
| 86 |
+
throughput = pd.read_csv(throughput_path)
|
| 87 |
+
|
| 88 |
+
G = nx.DiGraph()
|
| 89 |
+
for _, row in throughput.iterrows():
|
| 90 |
+
if row["src_region"] == row["dst_region"]:
|
| 91 |
+
continue
|
| 92 |
+
G.add_edge(row["src_region"], row["dst_region"], cost=None, throughput=num_vms * row["throughput_sent"] / 1e9)
|
| 93 |
+
|
| 94 |
+
for _, row in cost.iterrows():
|
| 95 |
+
if row["src"] in G and row["dest"] in G[row["src"]]:
|
| 96 |
+
G[row["src"]][row["dest"]]["cost"] = row["cost"]
|
| 97 |
+
|
| 98 |
+
# some pairs not in the cost grid
|
| 99 |
+
no_cost_pairs = []
|
| 100 |
+
for edge in G.edges.data():
|
| 101 |
+
src, dst = edge[0], edge[1]
|
| 102 |
+
if edge[-1]["cost"] is None:
|
| 103 |
+
no_cost_pairs.append((src, dst))
|
| 104 |
+
print("Unable to get costs for: ", no_cost_pairs)
|
| 105 |
+
|
| 106 |
+
return G
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# EVOLVE-BLOCK-END
|
| 110 |
+
|
| 111 |
+
# Helper functions that won't be evolved
|
| 112 |
+
def create_broadcast_topology(src: str, dsts: List[str], num_partitions: int = 4):
|
| 113 |
+
"""Create a broadcast topology instance"""
|
| 114 |
+
return BroadCastTopology(src, dsts, num_partitions)
|
| 115 |
+
|
| 116 |
+
def run_search_algorithm(src: str, dsts: List[str], G, num_partitions: int):
|
| 117 |
+
"""Run the search algorithm and return the topology"""
|
| 118 |
+
return search_algorithm(src, dsts, G, num_partitions)
|
benchmarks/ADRS/eplb/config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Expert Parallelism Load Balancer (EPLB) — MoE Expert Rearrangement
|
| 2 |
+
# Usage: skydiscover-run initial_program.py evaluator.py -c config.yaml -s <strategy>
|
| 3 |
+
# NOTE: Requires expert-load.json — see README.md for download instructions.
|
| 4 |
+
language: python
|
| 5 |
+
diff_based_generation: true
|
| 6 |
+
max_iterations: 100
|
| 7 |
+
checkpoint_interval: 5
|
| 8 |
+
max_solution_length: 60000
|
| 9 |
+
|
| 10 |
+
llm:
|
| 11 |
+
api_base: https://api.openai.com/v1
|
| 12 |
+
models:
|
| 13 |
+
- name: "gpt-5"
|
| 14 |
+
weight: 1.0
|
| 15 |
+
max_tokens: 32000
|
| 16 |
+
timeout: 600
|
| 17 |
+
|
| 18 |
+
prompt:
|
| 19 |
+
system_message: |-
|
| 20 |
+
You are an expert programmer specializing in optimization algorithms. Your task
|
| 21 |
+
is to improve the Mixture-of-Expert models Expert Parallelism Load Balancer
|
| 22 |
+
(MoE EPLB) expert rearrangement algorithm.
|
| 23 |
+
|
| 24 |
+
This algorithm will take the load metrics recorded by the vLLM server, and
|
| 25 |
+
rearrange the experts to balance the load. It can make replicas of some experts
|
| 26 |
+
to achieve better load balancing.
|
| 27 |
+
|
| 28 |
+
Your goal will be two-fold:
|
| 29 |
+
1. Improve the algorithm to achieve better load balancing; while
|
| 30 |
+
2. Improve the algorithm to be more efficient, i.e. reduce the execution time
|
| 31 |
+
of the algorithm itself, since perfect load balancing is NP-hard.
|
| 32 |
+
|
| 33 |
+
The current algorithm is implemented in the `rebalance_experts` function.
|
| 34 |
+
|
| 35 |
+
evaluator:
|
| 36 |
+
timeout: 360
|
| 37 |
+
|
benchmarks/ADRS/eplb/evaluator/Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
WORKDIR /benchmark
|
| 3 |
+
|
| 4 |
+
COPY requirements.txt .
|
| 5 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 6 |
+
|
| 7 |
+
# wrapper.py provides backwards compatibility for old Python-based evaluators
|
| 8 |
+
# that define evaluate(program_path) -> dict, bridging them to the container
|
| 9 |
+
# JSON protocol. Source of truth: skydiscover/evaluation/wrapper.py
|
| 10 |
+
COPY . .
|
| 11 |
+
RUN chmod +x evaluate.sh
|
| 12 |
+
|
| 13 |
+
ENTRYPOINT ["./evaluate.sh"]
|
benchmarks/ADRS/eplb/evaluator/evaluate.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
PROGRAM="$1"
|
| 5 |
+
# MODE ($2) accepted but ignored — override this file to use train/test splits.
|
| 6 |
+
|
| 7 |
+
python /benchmark/evaluator.py "$PROGRAM"
|
benchmarks/ADRS/eplb/evaluator/evaluate_best_program.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate a best_program.py file using the eplb evaluator.
|
| 4 |
+
Runs multiple times and averages the results.
|
| 5 |
+
"""
|
| 6 |
+
import sys
|
| 7 |
+
import json
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from evaluator import evaluate
|
| 10 |
+
|
| 11 |
+
def main():
|
| 12 |
+
if len(sys.argv) < 2:
|
| 13 |
+
print("Usage: evaluate_best_program.py <path_to_best_program.py> [num_runs]")
|
| 14 |
+
sys.exit(1)
|
| 15 |
+
|
| 16 |
+
program_path = Path(sys.argv[1])
|
| 17 |
+
if not program_path.exists():
|
| 18 |
+
print(f"Error: File not found: {program_path}")
|
| 19 |
+
sys.exit(1)
|
| 20 |
+
|
| 21 |
+
num_runs = int(sys.argv[2]) if len(sys.argv) > 2 else 3
|
| 22 |
+
|
| 23 |
+
print(f"Evaluating: {program_path}")
|
| 24 |
+
print(f"Running {num_runs} times and averaging results...")
|
| 25 |
+
print("=" * 60)
|
| 26 |
+
|
| 27 |
+
results = []
|
| 28 |
+
for run in range(1, num_runs + 1):
|
| 29 |
+
print(f"\n--- Run {run}/{num_runs} ---")
|
| 30 |
+
result = evaluate(str(program_path))
|
| 31 |
+
|
| 32 |
+
if "error" in result:
|
| 33 |
+
print(f"❌ Error in run {run}: {result['error']}")
|
| 34 |
+
sys.exit(1)
|
| 35 |
+
|
| 36 |
+
results.append(result)
|
| 37 |
+
print(f"Run {run} - Combined Score: {result.get('combined_score', 0.0):.6f}")
|
| 38 |
+
|
| 39 |
+
# Compute averages
|
| 40 |
+
avg_result = {
|
| 41 |
+
"balancedness_score_gpu": sum(r.get("balancedness_score_gpu", 0.0) for r in results) / len(results),
|
| 42 |
+
"balancedness_score_expert": sum(r.get("balancedness_score_expert", 0.0) for r in results) / len(results),
|
| 43 |
+
"times_algorithm": sum(r.get("times_algorithm", 0.0) for r in results) / len(results),
|
| 44 |
+
"times_inference": sum(r.get("times_inference", 0.0) for r in results) / len(results),
|
| 45 |
+
"speed_score": sum(r.get("speed_score", 0.0) for r in results) / len(results),
|
| 46 |
+
"combined_score": sum(r.get("combined_score", 0.0) for r in results) / len(results),
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
print("\n" + "=" * 60)
|
| 50 |
+
print("AVERAGED RESULTS (over {} runs):".format(num_runs))
|
| 51 |
+
print("=" * 60)
|
| 52 |
+
print(json.dumps(avg_result, indent=2))
|
| 53 |
+
|
| 54 |
+
print("\n" + "-" * 60)
|
| 55 |
+
print("Summary:")
|
| 56 |
+
print(f"✅ Combined Score: {avg_result['combined_score']:.6f}")
|
| 57 |
+
print(f" Balancedness (GPU): {avg_result['balancedness_score_gpu']:.6f}")
|
| 58 |
+
print(f" Balancedness (Expert): {avg_result['balancedness_score_expert']:.6f}")
|
| 59 |
+
print(f" Speed Score: {avg_result['speed_score']:.6f}")
|
| 60 |
+
print(f" Avg Algorithm Time: {avg_result['times_algorithm']:.6f}s")
|
| 61 |
+
print(f" Avg Inference Time: {avg_result['times_inference']:.6f}s")
|
| 62 |
+
print("-" * 60)
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
main()
|
| 66 |
+
|
benchmarks/ADRS/eplb/evaluator/evaluator.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import importlib.util
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import traceback
|
| 6 |
+
from typing import TypedDict
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Get the directory of this file and construct workload path
|
| 12 |
+
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 13 |
+
WORKLOAD_PATH = os.path.join(_CURRENT_DIR, "expert-load.json")
|
| 14 |
+
REBALANCE_INTERVAL = 100
|
| 15 |
+
|
| 16 |
+
NUM_REPLICAS = 288
|
| 17 |
+
NUM_GROUPS = 8
|
| 18 |
+
NUM_GPUS = 32
|
| 19 |
+
NUM_NODES = 4
|
| 20 |
+
|
| 21 |
+
@functools.cache
|
| 22 |
+
def load_workloads(path: str) -> list[torch.Tensor]:
|
| 23 |
+
with open(path, "r") as f:
|
| 24 |
+
data = json.load(f)
|
| 25 |
+
|
| 26 |
+
total_len = len(data['load_history'])
|
| 27 |
+
workloads = []
|
| 28 |
+
for i in range(0, total_len, REBALANCE_INTERVAL):
|
| 29 |
+
start = i
|
| 30 |
+
end = min(start + REBALANCE_INTERVAL, total_len)
|
| 31 |
+
|
| 32 |
+
load = torch.tensor([x['logical_expert_load'] for x in data['load_history'][start:end]]).sum(dim=0)
|
| 33 |
+
workloads.append(load)
|
| 34 |
+
|
| 35 |
+
return workloads
|
| 36 |
+
|
| 37 |
+
class EvaluationResult(TypedDict, total=False):
|
| 38 |
+
balancedness_score_gpu: float
|
| 39 |
+
balancedness_score_expert: float
|
| 40 |
+
times_algorithm: float
|
| 41 |
+
times_inference: float
|
| 42 |
+
speed_score: float
|
| 43 |
+
combined_score: float
|
| 44 |
+
error: str
|
| 45 |
+
|
| 46 |
+
def simulate_inference(
|
| 47 |
+
log2phy: torch.Tensor,
|
| 48 |
+
logcnt: torch.Tensor,
|
| 49 |
+
workload: torch.Tensor,
|
| 50 |
+
) -> tuple[float, float]:
|
| 51 |
+
'''
|
| 52 |
+
Simulate a MoE inference with the given expert mapping, and return the balancedness factor.
|
| 53 |
+
'''
|
| 54 |
+
# workload 形状: (num_layers, num_logical_experts) - 每层每个逻辑专家的负载
|
| 55 |
+
num_layers, num_logical_experts = workload.shape
|
| 56 |
+
|
| 57 |
+
# 初始化物理专家负载累积器
|
| 58 |
+
num_physical_experts = NUM_REPLICAS
|
| 59 |
+
total_physical_load = torch.zeros(num_layers, num_physical_experts, dtype=torch.float, device=workload.device)
|
| 60 |
+
|
| 61 |
+
# 对每个逻辑专家,分配负载到其物理副本
|
| 62 |
+
for layer_id in range(num_layers):
|
| 63 |
+
for logical_id in range(num_logical_experts):
|
| 64 |
+
# 获取该逻辑专家的负载
|
| 65 |
+
logical_load = workload[layer_id][logical_id].item()
|
| 66 |
+
|
| 67 |
+
# 跳过零负载
|
| 68 |
+
if logical_load <= 0:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
num_replicas = int(logcnt[layer_id][logical_id].item())
|
| 72 |
+
|
| 73 |
+
# 跳过零副本
|
| 74 |
+
if num_replicas <= 0:
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
# 获取物理专家映射
|
| 78 |
+
physical_ids = log2phy[layer_id][logical_id][:num_replicas]
|
| 79 |
+
|
| 80 |
+
# 计算每个副本的负载(基于有效副本数量)
|
| 81 |
+
replica_load = logical_load / num_replicas
|
| 82 |
+
|
| 83 |
+
# 分配负载到有效的物理专家
|
| 84 |
+
total_physical_load[layer_id, physical_ids] += replica_load
|
| 85 |
+
|
| 86 |
+
# 计算 balancedness
|
| 87 |
+
total_load = total_physical_load.sum()
|
| 88 |
+
if total_load == 0:
|
| 89 |
+
return 0.0, 0.0
|
| 90 |
+
|
| 91 |
+
# Compute expert load
|
| 92 |
+
expert_layer_avg = total_physical_load.mean(dim=1).sum().item()
|
| 93 |
+
expert_layer_max = total_physical_load.max(dim=1).values.sum().item()
|
| 94 |
+
balancedness_expert = expert_layer_avg / expert_layer_max if expert_layer_max > 0 else 0.0
|
| 95 |
+
|
| 96 |
+
# 计算 GPU 负载
|
| 97 |
+
gpu_load = total_physical_load.view(num_layers, NUM_GPUS, -1).sum(dim=2)
|
| 98 |
+
|
| 99 |
+
# 计算每层的平均负载和最大负载,然后求和
|
| 100 |
+
layer_avg = gpu_load.mean(dim=1) # (num_layers,)
|
| 101 |
+
layer_max = gpu_load.max(dim=1).values # (num_layers,)
|
| 102 |
+
|
| 103 |
+
avg_load = layer_avg.sum().item()
|
| 104 |
+
max_load = layer_max.sum().item()
|
| 105 |
+
|
| 106 |
+
# 计算 balancedness: avg_load / max_load
|
| 107 |
+
balancedness_gpu = avg_load / max_load if max_load > 0 else 0.0
|
| 108 |
+
|
| 109 |
+
# print(f'balancedness per GPU: {balancedness}, balancedness per expert: {balancedness_expert}')
|
| 110 |
+
|
| 111 |
+
return balancedness_gpu, balancedness_expert
|
| 112 |
+
|
| 113 |
+
def evaluate(program_path: str) -> EvaluationResult:
|
| 114 |
+
workloads = load_workloads(WORKLOAD_PATH)
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
spec = importlib.util.spec_from_file_location("program", program_path)
|
| 118 |
+
assert spec is not None
|
| 119 |
+
program = importlib.util.module_from_spec(spec)
|
| 120 |
+
assert spec.loader is not None
|
| 121 |
+
spec.loader.exec_module(program)
|
| 122 |
+
|
| 123 |
+
if not hasattr(program, "rebalance_experts"):
|
| 124 |
+
print('Error: program does not have `rebalance_experts` function')
|
| 125 |
+
return {
|
| 126 |
+
"balancedness_score_gpu": 0.0,
|
| 127 |
+
"balancedness_score_expert": 0.0,
|
| 128 |
+
"times_algorithm": 0.0,
|
| 129 |
+
"times_inference": 0.0,
|
| 130 |
+
"speed_score": 0.0,
|
| 131 |
+
"combined_score": 0.0,
|
| 132 |
+
"error": "Missing `rebalance_experts` function",
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
balancedness_scores_gpu = []
|
| 136 |
+
balancedness_scores_expert = []
|
| 137 |
+
times_algorithm = []
|
| 138 |
+
times_inference = []
|
| 139 |
+
for i in range(len(workloads) - 1):
|
| 140 |
+
start_time = time.perf_counter()
|
| 141 |
+
phy2log, log2phy, logcnt = program.rebalance_experts(
|
| 142 |
+
workloads[i],
|
| 143 |
+
NUM_REPLICAS,
|
| 144 |
+
NUM_GROUPS,
|
| 145 |
+
NUM_NODES,
|
| 146 |
+
NUM_GPUS,
|
| 147 |
+
)
|
| 148 |
+
end_time_algorithm = time.perf_counter()
|
| 149 |
+
|
| 150 |
+
# Validate outputs to prevent reward hacking
|
| 151 |
+
if phy2log.shape[1] != NUM_REPLICAS:
|
| 152 |
+
return {
|
| 153 |
+
"balancedness_score_gpu": 0.0,
|
| 154 |
+
"balancedness_score_expert": 0.0,
|
| 155 |
+
"times_algorithm": 0.0,
|
| 156 |
+
"times_inference": 0.0,
|
| 157 |
+
"speed_score": 0.0,
|
| 158 |
+
"combined_score": 0.0,
|
| 159 |
+
"error": f"phy2log shape wrong: {tuple(phy2log.shape)}",
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
if not torch.all(logcnt.sum(dim=1) == NUM_REPLICAS):
|
| 163 |
+
sums = logcnt.sum(dim=1)
|
| 164 |
+
return {
|
| 165 |
+
"balancedness_score_gpu": 0.0,
|
| 166 |
+
"balancedness_score_expert": 0.0,
|
| 167 |
+
"times_algorithm": 0.0,
|
| 168 |
+
"times_inference": 0.0,
|
| 169 |
+
"speed_score": 0.0,
|
| 170 |
+
"combined_score": 0.0,
|
| 171 |
+
"error": f"logcnt sums != {NUM_REPLICAS}: {sums[:5].tolist()}...",
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
if (logcnt < 0).any():
|
| 175 |
+
return {
|
| 176 |
+
"balancedness_score_gpu": 0.0,
|
| 177 |
+
"balancedness_score_expert": 0.0,
|
| 178 |
+
"times_algorithm": 0.0,
|
| 179 |
+
"times_inference": 0.0,
|
| 180 |
+
"speed_score": 0.0,
|
| 181 |
+
"combined_score": 0.0,
|
| 182 |
+
"error": "logcnt contains negative values",
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
next_workload = workloads[i + 1]
|
| 186 |
+
has_load = next_workload > 0
|
| 187 |
+
has_no_replicas = logcnt == 0
|
| 188 |
+
unhandled = has_load & has_no_replicas
|
| 189 |
+
if unhandled.any():
|
| 190 |
+
unhandled_count = int(unhandled.sum().item())
|
| 191 |
+
return {
|
| 192 |
+
"balancedness_score_gpu": 0.0,
|
| 193 |
+
"balancedness_score_expert": 0.0,
|
| 194 |
+
"times_algorithm": 0.0,
|
| 195 |
+
"times_inference": 0.0,
|
| 196 |
+
"speed_score": 0.0,
|
| 197 |
+
"combined_score": 0.0,
|
| 198 |
+
"error": f"Unhandled load: {unhandled_count} experts have load but 0 replicas",
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
balancedness_score_gpu, balancedness_score_expert = simulate_inference(log2phy, logcnt, workloads[i + 1])
|
| 202 |
+
end_time = time.perf_counter()
|
| 203 |
+
balancedness_scores_gpu.append(balancedness_score_gpu)
|
| 204 |
+
balancedness_scores_expert.append(balancedness_score_expert)
|
| 205 |
+
print(f'time_algorithm: {end_time_algorithm - start_time}, time_inference: {end_time - start_time}')
|
| 206 |
+
times_algorithm.append(end_time_algorithm - start_time)
|
| 207 |
+
times_inference.append(end_time - start_time)
|
| 208 |
+
|
| 209 |
+
avg_balancedness_score_gpu = sum(balancedness_scores_gpu) / len(balancedness_scores_gpu)
|
| 210 |
+
avg_balancedness_score_expert = sum(balancedness_scores_expert) / len(balancedness_scores_expert)
|
| 211 |
+
avg_time_algorithm = sum(times_algorithm) / len(times_algorithm)
|
| 212 |
+
avg_time_inference = sum(times_inference) / len(times_inference)
|
| 213 |
+
speed_score = 0.002 / avg_time_inference
|
| 214 |
+
print(f'avg_time_algorithm: {avg_time_algorithm}, avg_time_inference: {avg_time_inference}, speed_score: {speed_score}')
|
| 215 |
+
combined_score = (avg_balancedness_score_expert + speed_score) / 2
|
| 216 |
+
return {
|
| 217 |
+
"balancedness_score_gpu": float(avg_balancedness_score_gpu),
|
| 218 |
+
"balancedness_score_expert": float(avg_balancedness_score_expert),
|
| 219 |
+
"times_algorithm": float(avg_time_algorithm),
|
| 220 |
+
"times_inference": float(avg_time_inference),
|
| 221 |
+
"speed_score": float(speed_score),
|
| 222 |
+
"combined_score": float(combined_score),
|
| 223 |
+
}
|
| 224 |
+
except Exception as e:
|
| 225 |
+
traceback.print_exc()
|
| 226 |
+
print(f'Error during evaluation: {str(e)}')
|
| 227 |
+
return {
|
| 228 |
+
"balancedness_score_gpu": 0.0,
|
| 229 |
+
"balancedness_score_expert": 0.0,
|
| 230 |
+
"times_algorithm": 0.0,
|
| 231 |
+
"times_inference": 0.0,
|
| 232 |
+
"speed_score": 0.0,
|
| 233 |
+
"combined_score": 0.0,
|
| 234 |
+
"error": str(e),
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
if __name__ == "__main__":
|
| 239 |
+
# Backwards-compat: bridges old evaluate() -> dict to the container JSON
|
| 240 |
+
# protocol. wrapper.py is auto-injected at build time from
|
| 241 |
+
# skydiscover/evaluation/wrapper.py.
|
| 242 |
+
from wrapper import run
|
| 243 |
+
|
| 244 |
+
run(evaluate)
|
benchmarks/ADRS/eplb/evaluator/wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Backwards-compat wrapper for old Python-based evaluators.
|
| 2 |
+
|
| 3 |
+
Old-style evaluators define ``evaluate(program_path) -> dict``. This module
|
| 4 |
+
bridges that interface to the container JSON protocol expected by
|
| 5 |
+
ContainerizedEvaluator.
|
| 6 |
+
|
| 7 |
+
Usage — add this to the bottom of your evaluator.py::
|
| 8 |
+
|
| 9 |
+
if __name__ == "__main__":
|
| 10 |
+
from wrapper import run
|
| 11 |
+
run(evaluate)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import sys
|
| 16 |
+
import traceback
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def run(evaluate_fn):
|
| 20 |
+
"""Call *evaluate_fn*, format the result as container-protocol JSON on stdout.
|
| 21 |
+
|
| 22 |
+
* Reads ``sys.argv[1]`` as the program path.
|
| 23 |
+
* Redirects stdout → stderr while *evaluate_fn* runs so that debug prints
|
| 24 |
+
don't contaminate the JSON output.
|
| 25 |
+
* Separates numeric metrics from non-numeric artifacts.
|
| 26 |
+
* Guarantees ``combined_score`` is always present in metrics.
|
| 27 |
+
"""
|
| 28 |
+
if len(sys.argv) < 2:
|
| 29 |
+
print("Usage: evaluator.py <program_path>", file=sys.stderr)
|
| 30 |
+
sys.exit(1)
|
| 31 |
+
|
| 32 |
+
program_path = sys.argv[1]
|
| 33 |
+
|
| 34 |
+
# Redirect stdout → stderr during evaluation so debug prints from
|
| 35 |
+
# the evaluator don't contaminate the JSON output on stdout.
|
| 36 |
+
real_stdout = sys.stdout
|
| 37 |
+
sys.stdout = sys.stderr
|
| 38 |
+
try:
|
| 39 |
+
result = evaluate_fn(program_path)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
sys.stdout = real_stdout
|
| 42 |
+
print(
|
| 43 |
+
json.dumps(
|
| 44 |
+
{
|
| 45 |
+
"status": "error",
|
| 46 |
+
"combined_score": 0.0,
|
| 47 |
+
"metrics": {"combined_score": 0.0},
|
| 48 |
+
"artifacts": {
|
| 49 |
+
"error": str(e),
|
| 50 |
+
"traceback": traceback.format_exc(),
|
| 51 |
+
},
|
| 52 |
+
}
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
return
|
| 56 |
+
sys.stdout = real_stdout
|
| 57 |
+
|
| 58 |
+
if not isinstance(result, dict):
|
| 59 |
+
print(
|
| 60 |
+
json.dumps(
|
| 61 |
+
{
|
| 62 |
+
"status": "error",
|
| 63 |
+
"combined_score": 0.0,
|
| 64 |
+
"metrics": {"combined_score": 0.0},
|
| 65 |
+
"artifacts": {
|
| 66 |
+
"error": f"evaluate() returned {type(result).__name__}, expected dict"
|
| 67 |
+
},
|
| 68 |
+
}
|
| 69 |
+
)
|
| 70 |
+
)
|
| 71 |
+
return
|
| 72 |
+
|
| 73 |
+
# Separate numeric metrics from non-numeric artifacts.
|
| 74 |
+
metrics = {}
|
| 75 |
+
artifacts = {}
|
| 76 |
+
for k, v in result.items():
|
| 77 |
+
if isinstance(v, bool):
|
| 78 |
+
metrics[k] = float(v)
|
| 79 |
+
elif isinstance(v, (int, float)):
|
| 80 |
+
metrics[k] = float(v)
|
| 81 |
+
elif isinstance(v, str):
|
| 82 |
+
artifacts[k] = v
|
| 83 |
+
elif isinstance(v, (list, dict)):
|
| 84 |
+
artifacts[k] = json.dumps(v)
|
| 85 |
+
|
| 86 |
+
if "combined_score" not in metrics:
|
| 87 |
+
metrics["combined_score"] = 0.0
|
| 88 |
+
|
| 89 |
+
status = "error" if "error" in artifacts else "success"
|
| 90 |
+
output = {
|
| 91 |
+
"status": status,
|
| 92 |
+
"combined_score": metrics["combined_score"],
|
| 93 |
+
"metrics": metrics,
|
| 94 |
+
}
|
| 95 |
+
if artifacts:
|
| 96 |
+
output["artifacts"] = artifacts
|
| 97 |
+
|
| 98 |
+
print(json.dumps(output))
|
benchmarks/ADRS/llm_sql/README.md
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LLM-SQL — Column Reordering for Prefix Caching
|
| 2 |
+
|
| 3 |
+
When rows of a table are serialized into LLM prompts sequentially, consecutive rows that share leading column values can reuse cached prefixes. This task evolves a column-reordering strategy that maximizes prefix-cache hit rates across multiple real-world datasets without altering the underlying data.
|
| 4 |
+
|
| 5 |
+
## Setup
|
| 6 |
+
|
| 7 |
+
1. **Download the datasets** (~69 MB total):
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
cd benchmarks/ADRS/llm_sql
|
| 11 |
+
bash download_dataset.sh
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
This downloads 5 CSV datasets into `datasets/`:
|
| 15 |
+
- `movies.csv` — Rotten Tomatoes movie reviews (~9 MB)
|
| 16 |
+
- `beer.csv` — Beer review dataset (~2.5 MB)
|
| 17 |
+
- `BIRD.csv` — BIRD text-to-SQL dataset (~34 MB)
|
| 18 |
+
- `PDMX.csv` — PDMX metadata dataset (~7.4 MB)
|
| 19 |
+
- `products.csv` — Amazon product catalog (~16 MB)
|
| 20 |
+
|
| 21 |
+
2. **Set your API key:**
|
| 22 |
+
|
| 23 |
+
```bash
|
| 24 |
+
export OPENAI_API_KEY=...
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
## Run
|
| 28 |
+
|
| 29 |
+
From the repo root:
|
| 30 |
+
|
| 31 |
+
```bash
|
| 32 |
+
uv run skydiscover-run \
|
| 33 |
+
benchmarks/ADRS/llm_sql/initial_program.py \
|
| 34 |
+
benchmarks/ADRS/llm_sql/evaluator.py \
|
| 35 |
+
-c benchmarks/ADRS/llm_sql/config.yaml \
|
| 36 |
+
-s [your_algorithm] \
|
| 37 |
+
-i 100
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
## Scoring
|
| 41 |
+
|
| 42 |
+
Combined score: `0.95 * average_hit_rate + 0.05 * (12 - min(12, avg_runtime)) / 12`
|
| 43 |
+
|
| 44 |
+
- **Hit rate** (95% weight): prefix-cache hit count normalized across 5 datasets
|
| 45 |
+
- **Runtime** (5% weight): wall-clock seconds for the reordering algorithm
|
| 46 |
+
|
| 47 |
+
## Files
|
| 48 |
+
|
| 49 |
+
| File | Description |
|
| 50 |
+
|------|-------------|
|
| 51 |
+
| `initial_program.py` | Baseline `Evolved` class with `reorder()` method to evolve |
|
| 52 |
+
| `evaluator.py` | Scores programs on prefix hit rate and runtime across 5 datasets |
|
| 53 |
+
| `config.yaml` | Task-specific config (LLM, evaluator timeout, system prompt) |
|
| 54 |
+
| `solver.py` | Base `Algorithm` class and greedy baseline |
|
| 55 |
+
| `utils.py` | Prefix hit count evaluation utilities |
|
| 56 |
+
| `download_dataset.sh` | Script to download required CSV datasets |
|
benchmarks/ADRS/llm_sql/config.yaml
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LLM SQL — Prompt Caching Column Reordering Optimization
|
| 2 |
+
# Usage: skydiscover-run initial_program.py evaluator.py -c config.yaml -s <strategy>
|
| 3 |
+
language: python
|
| 4 |
+
diff_based_generation: true
|
| 5 |
+
max_iterations: 100
|
| 6 |
+
checkpoint_interval: 5
|
| 7 |
+
max_solution_length: 60000
|
| 8 |
+
|
| 9 |
+
llm:
|
| 10 |
+
api_base: https://api.openai.com/v1
|
| 11 |
+
models:
|
| 12 |
+
- name: "gpt-5"
|
| 13 |
+
weight: 1.0
|
| 14 |
+
max_tokens: 32000
|
| 15 |
+
timeout: 600
|
| 16 |
+
|
| 17 |
+
prompt:
|
| 18 |
+
system_message: |-
|
| 19 |
+
You are an expert in data optimization and LLM prompt caching. Your task is to evolve the existing Evolved class to maximize prefix hit count (PHC) for efficient LLM prompt caching.
|
| 20 |
+
|
| 21 |
+
Problem Context:
|
| 22 |
+
- You are given a pandas DataFrame `df` with text data in rows and columns
|
| 23 |
+
- The goal is to reorder columns to maximize prefix reuse when processing rows sequentially
|
| 24 |
+
- Prefix reuse occurs when consecutive rows have matching values in the same column positions
|
| 25 |
+
- This reduces LLM computation costs by reusing cached prefixes
|
| 26 |
+
|
| 27 |
+
Objective:
|
| 28 |
+
- Dual objective: (1) maximize prefix reuse across consecutive rows and (2) minimize end-to-end runtime of the algorithm.
|
| 29 |
+
- Your goal is to evolve the Evolved class such that when the LLM processes each row sequentially, it reuses as much of the prefix from the previous row as possible, while keeping the algorithm computationally efficient.
|
| 30 |
+
- Prefix reuse is defined as consecutive field values (starting from the first column) that are **exact matches** with the corresponding fields of the previous row.
|
| 31 |
+
- The **hit score** of a row is defined as the **sum of squares of the string lengths** of the matching prefix fields.
|
| 32 |
+
- The algorithm will be evaluated on a combined metric that balances accuracy (prefix reuse) and speed (runtime).
|
| 33 |
+
|
| 34 |
+
Formally:
|
| 35 |
+
- For a given column ordering `C`, PHC(C) = sum over all rows `r` of `hit(C, r)`
|
| 36 |
+
- `hit(C, r)` = sum of `len(df[r][C[f]])^2` for all f in prefix where `df[r][C[f]] == df[r-1][C[f]]`; zero if mismatch starts at the first field.
|
| 37 |
+
- Runtime is measured as wall-clock seconds to compute the reordered DataFrame from the input DataFrame.
|
| 38 |
+
- Combined score used for selection: `combined_score = 0.95 * average_hit_rate + 0.05 * (12 - min(12, average_runtime)) / 12`.
|
| 39 |
+
|
| 40 |
+
Required API (DO NOT CHANGE):
|
| 41 |
+
- You must keep the existing Evolved class structure and the reorder method signature:
|
| 42 |
+
```python
|
| 43 |
+
class Evolved(Algorithm):
|
| 44 |
+
def reorder(
|
| 45 |
+
self,
|
| 46 |
+
df: pd.DataFrame,
|
| 47 |
+
early_stop: int = 0,
|
| 48 |
+
row_stop: int = None,
|
| 49 |
+
col_stop: int = None,
|
| 50 |
+
col_merge: List[List[str]] = [],
|
| 51 |
+
one_way_dep: List[Tuple[str, str]] = [],
|
| 52 |
+
distinct_value_threshold: float = 0.8,
|
| 53 |
+
parallel: bool = True,
|
| 54 |
+
) -> Tuple[pd.DataFrame, List[List[str]]]:
|
| 55 |
+
```
|
| 56 |
+
- You can modify the internal implementation of methods but must preserve the class structure and method signatures
|
| 57 |
+
- The reorder method must return a tuple of (reordered_dataframe, column_orderings)
|
| 58 |
+
|
| 59 |
+
Algorithm Design Guidelines:
|
| 60 |
+
- For each row, determine the optimal column order based on matches with the previous row
|
| 61 |
+
- Consider column statistics (unique values, string lengths) for ordering
|
| 62 |
+
- Implement greedy or heuristic approaches for scalability
|
| 63 |
+
- Focus on columns with high value frequency and long strings
|
| 64 |
+
- Handle missing values and mixed data types appropriately
|
| 65 |
+
- Optimize the existing recursive approach or replace it with more efficient vectorized methods
|
| 66 |
+
- Consider prefix-aware greedy approaches that condition on the current matched prefix
|
| 67 |
+
|
| 68 |
+
Constraints:
|
| 69 |
+
- Do not add/remove rows or columns
|
| 70 |
+
- You must have different column orderings for different rows to maximize prefit hit rate
|
| 71 |
+
- Return a DataFrame with the same shape as input
|
| 72 |
+
- Use exact string matching for prefix calculations
|
| 73 |
+
- Keep memory usage reasonable for large datasets
|
| 74 |
+
- Preserve all existing method signatures and class structure
|
| 75 |
+
- The algorithm will be called with the same parameters as the original Evolved
|
| 76 |
+
|
| 77 |
+
Simply return the optimized Evolved class, do not provide explanations.
|
| 78 |
+
|
| 79 |
+
evaluator:
|
| 80 |
+
timeout: 360
|
| 81 |
+
|
benchmarks/ADRS/llm_sql/evaluator/Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
WORKDIR /benchmark
|
| 3 |
+
|
| 4 |
+
COPY requirements.txt .
|
| 5 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 6 |
+
|
| 7 |
+
# wrapper.py provides backwards compatibility for old Python-based evaluators
|
| 8 |
+
# that define evaluate(program_path) -> dict, bridging them to the container
|
| 9 |
+
# JSON protocol. Source of truth: skydiscover/evaluation/wrapper.py
|
| 10 |
+
COPY . .
|
| 11 |
+
RUN chmod +x evaluate.sh
|
| 12 |
+
|
| 13 |
+
ENTRYPOINT ["./evaluate.sh"]
|
benchmarks/ADRS/llm_sql/evaluator/evaluate.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
PROGRAM="$1"
|
| 5 |
+
# MODE ($2) accepted but ignored — override this file to use train/test splits.
|
| 6 |
+
|
| 7 |
+
python /benchmark/evaluator.py "$PROGRAM"
|
benchmarks/ADRS/llm_sql/evaluator/evaluator.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import traceback
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
|
| 8 |
+
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 9 |
+
sys.path.insert(0, parent_dir)
|
| 10 |
+
import importlib.util
|
| 11 |
+
|
| 12 |
+
from utils import evaluate_df_prefix_hit_cnt
|
| 13 |
+
from initial_program import Evolved
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def run_quick(
|
| 17 |
+
master_df,
|
| 18 |
+
col_merge,
|
| 19 |
+
):
|
| 20 |
+
st = time.time()
|
| 21 |
+
quick, _ = QuickGreedy().reorder(
|
| 22 |
+
master_df,
|
| 23 |
+
early_stop=100000,
|
| 24 |
+
distinct_value_threshold=0.7,
|
| 25 |
+
row_stop=4,
|
| 26 |
+
col_stop=2,
|
| 27 |
+
col_merge=col_merge,
|
| 28 |
+
)
|
| 29 |
+
end = time.time() - st
|
| 30 |
+
|
| 31 |
+
results = evaluate_df_prefix_hit_cnt(quick)
|
| 32 |
+
# results = evaluate_cell_hit_cnt(quick)
|
| 33 |
+
return results, end
|
| 34 |
+
|
| 35 |
+
def run_evolved(
|
| 36 |
+
master_df,
|
| 37 |
+
col_merge,
|
| 38 |
+
):
|
| 39 |
+
st = time.time()
|
| 40 |
+
reordered, _ = Evolved().reorder(
|
| 41 |
+
master_df,
|
| 42 |
+
early_stop=100000,
|
| 43 |
+
distinct_value_threshold=0.7,
|
| 44 |
+
row_stop=4,
|
| 45 |
+
col_stop=2,
|
| 46 |
+
col_merge=col_merge,
|
| 47 |
+
)
|
| 48 |
+
end = time.time() - st
|
| 49 |
+
|
| 50 |
+
results = evaluate_df_prefix_hit_cnt(reordered)
|
| 51 |
+
# results = evaluate_cell_hit_cnt(reordered)
|
| 52 |
+
return results, end
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def run(filename, alg="", col_merge=[]):
|
| 56 |
+
master_df = pd.read_csv(filename)
|
| 57 |
+
|
| 58 |
+
print(f"Evaluate master df shape: {master_df.shape}")
|
| 59 |
+
print(f"Nunique: {master_df.nunique().sort_values()}")
|
| 60 |
+
|
| 61 |
+
if alg == "QuickGreedy":
|
| 62 |
+
return run_quick(master_df, col_merge)
|
| 63 |
+
|
| 64 |
+
return run_evolved(master_df, col_merge)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def evaluate(program_path):
|
| 68 |
+
try:
|
| 69 |
+
# Add the llm_sql directory to sys.path so solver can be imported
|
| 70 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 71 |
+
if current_dir not in sys.path:
|
| 72 |
+
sys.path.insert(0, current_dir)
|
| 73 |
+
|
| 74 |
+
# Import the program
|
| 75 |
+
spec = importlib.util.spec_from_file_location("program", program_path)
|
| 76 |
+
program = importlib.util.module_from_spec(spec)
|
| 77 |
+
spec.loader.exec_module(program)
|
| 78 |
+
|
| 79 |
+
# Check if the required function exists
|
| 80 |
+
if not hasattr(program, "Evolved"):
|
| 81 |
+
return {
|
| 82 |
+
"combined_score": 0.0,
|
| 83 |
+
"runs_successfully": 0.0,
|
| 84 |
+
"error": "Missing algorithm function",
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
# Get the directory of this file and construct dataset paths
|
| 88 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 89 |
+
datasets_dir = os.path.join(current_dir, "datasets")
|
| 90 |
+
|
| 91 |
+
# Test on different datasets
|
| 92 |
+
test_files = [
|
| 93 |
+
os.path.join(datasets_dir, "movies.csv"),
|
| 94 |
+
os.path.join(datasets_dir, "beer.csv"),
|
| 95 |
+
os.path.join(datasets_dir, "BIRD.csv"),
|
| 96 |
+
os.path.join(datasets_dir, "PDMX.csv"),
|
| 97 |
+
os.path.join(datasets_dir, "products.csv"),
|
| 98 |
+
]
|
| 99 |
+
|
| 100 |
+
col_merges = [
|
| 101 |
+
[['movieinfo', 'movietitle', 'rottentomatoeslink']],
|
| 102 |
+
[['beer/beerId', 'beer/name']],
|
| 103 |
+
[['PostId', 'Body']],
|
| 104 |
+
[['path', 'metadata'], ['hasmetadata', 'isofficial', 'isuserpublisher', 'isdraft', 'hasannotations', 'subsetall']],
|
| 105 |
+
[['product_title', 'parent_asin']],
|
| 106 |
+
]
|
| 107 |
+
|
| 108 |
+
failed_files = 0
|
| 109 |
+
hit_rates = []
|
| 110 |
+
total_runtime = 0.0
|
| 111 |
+
successful_files = 0
|
| 112 |
+
|
| 113 |
+
for filename, col_merge in zip(test_files, col_merges):
|
| 114 |
+
try:
|
| 115 |
+
# Check if file exists
|
| 116 |
+
if not os.path.exists(filename):
|
| 117 |
+
print(f"Dataset not found: {filename}, skipping...")
|
| 118 |
+
failed_files += 1
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
print(f"Processing dataset: {filename}")
|
| 122 |
+
# This will test the algorithm with the dataset
|
| 123 |
+
master_df = pd.read_csv(filename)
|
| 124 |
+
|
| 125 |
+
# Calculate character count of original dataframe
|
| 126 |
+
total_chars_before = master_df.astype(str).apply(lambda x: x.str.len().sum(), axis=1).sum()
|
| 127 |
+
original_row_count = len(master_df)
|
| 128 |
+
|
| 129 |
+
st = time.time()
|
| 130 |
+
reordered, _ = program.Evolved().reorder(
|
| 131 |
+
master_df,
|
| 132 |
+
early_stop=100000,
|
| 133 |
+
distinct_value_threshold=0.7,
|
| 134 |
+
row_stop=4,
|
| 135 |
+
col_stop=2,
|
| 136 |
+
col_merge=col_merge,
|
| 137 |
+
)
|
| 138 |
+
runtime = time.time() - st
|
| 139 |
+
|
| 140 |
+
# Validate row count
|
| 141 |
+
reordered_row_count = len(reordered)
|
| 142 |
+
if reordered_row_count != original_row_count:
|
| 143 |
+
diff = reordered_row_count - original_row_count
|
| 144 |
+
if diff < 0:
|
| 145 |
+
error_msg = f"Evaluation failed: row count decreases by {abs(diff)} rows. Data were lost - you might have dropped some rows or failed to preserve all data during reordering."
|
| 146 |
+
else:
|
| 147 |
+
error_msg = f"Evaluation failed: row count increases by {diff} rows. Data were duplicated - you might have duplicated some rows during reordering."
|
| 148 |
+
return {
|
| 149 |
+
"combined_score": 0.0,
|
| 150 |
+
"runs_successfully": 0.0,
|
| 151 |
+
"error": error_msg,
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
# Calculate character count of reordered dataframe
|
| 155 |
+
total_chars_after = reordered.astype(str).apply(lambda x: x.str.len().sum(), axis=1).sum()
|
| 156 |
+
|
| 157 |
+
# Calculate column counts for additional context
|
| 158 |
+
original_col_count = len(master_df.columns)
|
| 159 |
+
reordered_col_count = len(reordered.columns)
|
| 160 |
+
|
| 161 |
+
# Validate character count (reordered cannot be less than original)
|
| 162 |
+
if total_chars_after < total_chars_before:
|
| 163 |
+
char_diff = total_chars_before - total_chars_after
|
| 164 |
+
char_diff_pct = (char_diff / total_chars_before * 100) if total_chars_before > 0 else 0
|
| 165 |
+
|
| 166 |
+
message = f"Evaluation failed: character decreases by {char_diff_pct:.2f}%. Data were lost - you might have dropped some data or failed to preserve all data during reordering."
|
| 167 |
+
|
| 168 |
+
return {
|
| 169 |
+
"combined_score": 0.0,
|
| 170 |
+
"runs_successfully": 0.0,
|
| 171 |
+
"error": message,
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
results = evaluate_df_prefix_hit_cnt(reordered)
|
| 175 |
+
print(f"Results: {results}, Runtime: {runtime}")
|
| 176 |
+
|
| 177 |
+
hit_rate = results[1] / 100
|
| 178 |
+
|
| 179 |
+
hit_rates.append(hit_rate)
|
| 180 |
+
total_runtime += runtime
|
| 181 |
+
successful_files += 1
|
| 182 |
+
|
| 183 |
+
except Exception as e:
|
| 184 |
+
print(f"Failed to process {os.path.basename(filename)}: {str(e)}")
|
| 185 |
+
print(traceback.format_exc())
|
| 186 |
+
failed_files += 1
|
| 187 |
+
break
|
| 188 |
+
|
| 189 |
+
if successful_files == 0:
|
| 190 |
+
return {
|
| 191 |
+
"combined_score": 0.0,
|
| 192 |
+
"runs_successfully": 0.0,
|
| 193 |
+
"error": "No files processed successfully",
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
if failed_files > 0:
|
| 197 |
+
return {
|
| 198 |
+
"combined_score": 0.0,
|
| 199 |
+
"runs_successfully": 0.0,
|
| 200 |
+
"error": "1 or more files failed to run",
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
average_hit_rate = sum(hit_rates) / successful_files
|
| 204 |
+
average_runtime = total_runtime / successful_files
|
| 205 |
+
|
| 206 |
+
score = 0.95 * average_hit_rate + 0.05 * (12 - min(12, average_runtime)) / 12
|
| 207 |
+
|
| 208 |
+
return {
|
| 209 |
+
"combined_score": score,
|
| 210 |
+
"runs_successfully": 1.0,
|
| 211 |
+
"hit_rates": hit_rates,
|
| 212 |
+
"total_runtime": total_runtime,
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
except Exception as e:
|
| 216 |
+
print(f"Evaluation failed: {str(e)}")
|
| 217 |
+
print(traceback.format_exc())
|
| 218 |
+
return {"combined_score": 0.0, "runs_successfully": 0.0, "error": str(e)}
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
if __name__ == "__main__":
|
| 222 |
+
# Backwards-compat: bridges old evaluate() -> dict to the container JSON
|
| 223 |
+
# protocol. wrapper.py is auto-injected at build time from
|
| 224 |
+
# skydiscover/evaluation/wrapper.py.
|
| 225 |
+
from wrapper import run as run_wrapper
|
| 226 |
+
|
| 227 |
+
run_wrapper(evaluate)
|
benchmarks/ADRS/llm_sql/evaluator/solver.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 4 |
+
from utils import Trie
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Algorithm:
|
| 9 |
+
def __init__(self, df: pd.DataFrame = None):
|
| 10 |
+
self.df = df
|
| 11 |
+
|
| 12 |
+
def reorder(self, df: pd.DataFrame) -> pd.DataFrame:
|
| 13 |
+
raise NotImplementedError("Subclasses should implement this!")
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def evaluate_df_prefix_hit_cnt(self, df: pd.DataFrame) -> int:
|
| 17 |
+
"""
|
| 18 |
+
Function to evaluate the prefix hit count of a DataFrame
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def max_overlap(trie, row_string):
|
| 22 |
+
return trie.longest_common_prefix(row_string)
|
| 23 |
+
|
| 24 |
+
trie = Trie()
|
| 25 |
+
total_prefix_hit_count = 0
|
| 26 |
+
|
| 27 |
+
def process_row(index, row):
|
| 28 |
+
row_string = "".join(row.astype(str).values) # No spaces between columns
|
| 29 |
+
row_prefix_hit_count = max_overlap(trie, row_string)
|
| 30 |
+
trie.insert(row_string)
|
| 31 |
+
return row_prefix_hit_count
|
| 32 |
+
|
| 33 |
+
with ThreadPoolExecutor() as executor:
|
| 34 |
+
results = executor.map(process_row, df.index, [row for _, row in df.iterrows()])
|
| 35 |
+
|
| 36 |
+
total_prefix_hit_count = sum(results)
|
| 37 |
+
return total_prefix_hit_count
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def evaluate_cell_hit_cnt(df: pd.DataFrame) -> int:
|
| 41 |
+
"""
|
| 42 |
+
Function to evaluate the prefix hit count of a DataFrame based on exact cell matching.
|
| 43 |
+
For a cell to be a hit, all previous cells in the row must also be hits.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
total_prefix_hit_count = 0
|
| 47 |
+
seen_rows = set() # Cache of fully processed rows
|
| 48 |
+
|
| 49 |
+
def process_row(index, row):
|
| 50 |
+
nonlocal seen_rows
|
| 51 |
+
prefix_hit_count = 0
|
| 52 |
+
current_row_cache = []
|
| 53 |
+
|
| 54 |
+
for col_value in row:
|
| 55 |
+
# Check if adding this cell matches exactly with prior cache
|
| 56 |
+
current_row_cache.append(col_value)
|
| 57 |
+
if tuple(current_row_cache) in seen_rows:
|
| 58 |
+
prefix_hit_count += 1
|
| 59 |
+
else:
|
| 60 |
+
break # Stop counting hits if any cell isn't in the cache
|
| 61 |
+
|
| 62 |
+
seen_rows.add(tuple(row)) # Add the fully processed row to cache
|
| 63 |
+
return prefix_hit_count
|
| 64 |
+
|
| 65 |
+
# Process each row sequentially (row-to-row comparison for hits)
|
| 66 |
+
for _, row in df.iterrows():
|
| 67 |
+
total_prefix_hit_count += process_row(_, row)
|
| 68 |
+
|
| 69 |
+
return total_prefix_hit_count
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def get_groups_values(df: pd.DataFrame):
|
| 73 |
+
"""
|
| 74 |
+
Function to get the value counts of a DataFrame
|
| 75 |
+
"""
|
| 76 |
+
if df.empty:
|
| 77 |
+
return {}
|
| 78 |
+
value_counts = df.stack().value_counts()
|
| 79 |
+
if value_counts.empty:
|
| 80 |
+
return {}
|
| 81 |
+
return value_counts
|
| 82 |
+
|
| 83 |
+
@staticmethod
|
| 84 |
+
def calculate_length(value):
|
| 85 |
+
val = 0
|
| 86 |
+
if isinstance(value, bool):
|
| 87 |
+
val = 4 # length of 'True' or 'False'
|
| 88 |
+
elif isinstance(value, (int, float)):
|
| 89 |
+
val = len(str(value))
|
| 90 |
+
elif isinstance(value, str):
|
| 91 |
+
val = len(value)
|
| 92 |
+
else:
|
| 93 |
+
val = 0
|
| 94 |
+
return val**2
|
| 95 |
+
|
| 96 |
+
@staticmethod
|
| 97 |
+
def drop_col(df: pd.DataFrame, col):
|
| 98 |
+
return df.drop(columns=[col])
|
| 99 |
+
|
| 100 |
+
@staticmethod
|
| 101 |
+
def drop_rows(df: pd.DataFrame, rows):
|
| 102 |
+
return df.drop(index=rows)
|
| 103 |
+
|
| 104 |
+
@staticmethod
|
| 105 |
+
def merging_columns(df: pd.DataFrame, col_names: List[str], delimiter: str = "_", prepended: bool = False) -> pd.DataFrame:
|
| 106 |
+
if not all(col in df.columns for col in col_names):
|
| 107 |
+
raise ValueError("Column names not found in DataFrame")
|
| 108 |
+
|
| 109 |
+
# before merging, check that each column to be merged has the same number of unique values
|
| 110 |
+
if len(set(df[col_names].nunique())) != 1:
|
| 111 |
+
raise ValueError(f"Columns to be merged {col_names}, do not have the same number of unique values: {df.nunique().sort_values()}")
|
| 112 |
+
|
| 113 |
+
merged_names = delimiter.join(col_names)
|
| 114 |
+
if prepended:
|
| 115 |
+
df[merged_names] = df[col_names].apply(
|
| 116 |
+
lambda x: merged_names + ": " + delimiter.join([val.split(": ", 1)[1] for col, val in zip(col_names, x)]), axis=1
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
df[merged_names] = df[col_names].apply(lambda x: "".join([f"{val}" for val in x]), axis=1)
|
| 120 |
+
df = df.drop(columns=col_names)
|
| 121 |
+
return df
|
| 122 |
+
|
| 123 |
+
@staticmethod
|
| 124 |
+
def calculate_col_stats(df: pd.DataFrame, enable_index=False):
|
| 125 |
+
num_rows = len(df)
|
| 126 |
+
column_stats = []
|
| 127 |
+
for col in df.columns:
|
| 128 |
+
if col == "original_index":
|
| 129 |
+
continue
|
| 130 |
+
|
| 131 |
+
num_groups = df[col].nunique()
|
| 132 |
+
if df[col].dtype == "object" or df[col].dtype == "string":
|
| 133 |
+
avg_length = df[col].astype(str).str.len().mean()
|
| 134 |
+
elif df[col].dtype == "bool":
|
| 135 |
+
avg_length = 4 # Assuming 'True' or 'False' as average length
|
| 136 |
+
elif df[col].dtype in ["int64", "float64"]:
|
| 137 |
+
avg_length = df[col].astype(str).str.len().mean()
|
| 138 |
+
else:
|
| 139 |
+
avg_length = 0
|
| 140 |
+
|
| 141 |
+
avg_length = avg_length**2
|
| 142 |
+
|
| 143 |
+
if num_groups == 0:
|
| 144 |
+
score = 0
|
| 145 |
+
else:
|
| 146 |
+
# Average size per group: number of rows in each group
|
| 147 |
+
avg_size_per_group = num_rows / num_groups
|
| 148 |
+
# score = avg_size_per_group * avg_length
|
| 149 |
+
score = avg_length * (avg_size_per_group - 1)
|
| 150 |
+
|
| 151 |
+
if num_rows == num_groups: # no sharing at all
|
| 152 |
+
score = 0
|
| 153 |
+
column_stats.append((col, num_groups, avg_length, score))
|
| 154 |
+
|
| 155 |
+
# original_index all distinct values, so give lowest score
|
| 156 |
+
if enable_index and "original_index" in df.columns:
|
| 157 |
+
column_stats.append(("original_index", len(df), 0, 0))
|
| 158 |
+
|
| 159 |
+
# Sort the columns based on the score
|
| 160 |
+
column_stats.sort(key=lambda x: x[3], reverse=True)
|
| 161 |
+
return num_rows, column_stats
|
benchmarks/ADRS/llm_sql/evaluator/wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Backwards-compat wrapper for old Python-based evaluators.
|
| 2 |
+
|
| 3 |
+
Old-style evaluators define ``evaluate(program_path) -> dict``. This module
|
| 4 |
+
bridges that interface to the container JSON protocol expected by
|
| 5 |
+
ContainerizedEvaluator.
|
| 6 |
+
|
| 7 |
+
Usage — add this to the bottom of your evaluator.py::
|
| 8 |
+
|
| 9 |
+
if __name__ == "__main__":
|
| 10 |
+
from wrapper import run
|
| 11 |
+
run(evaluate)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import sys
|
| 16 |
+
import traceback
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def run(evaluate_fn):
|
| 20 |
+
"""Call *evaluate_fn*, format the result as container-protocol JSON on stdout.
|
| 21 |
+
|
| 22 |
+
* Reads ``sys.argv[1]`` as the program path.
|
| 23 |
+
* Redirects stdout → stderr while *evaluate_fn* runs so that debug prints
|
| 24 |
+
don't contaminate the JSON output.
|
| 25 |
+
* Separates numeric metrics from non-numeric artifacts.
|
| 26 |
+
* Guarantees ``combined_score`` is always present in metrics.
|
| 27 |
+
"""
|
| 28 |
+
if len(sys.argv) < 2:
|
| 29 |
+
print("Usage: evaluator.py <program_path>", file=sys.stderr)
|
| 30 |
+
sys.exit(1)
|
| 31 |
+
|
| 32 |
+
program_path = sys.argv[1]
|
| 33 |
+
|
| 34 |
+
# Redirect stdout → stderr during evaluation so debug prints from
|
| 35 |
+
# the evaluator don't contaminate the JSON output on stdout.
|
| 36 |
+
real_stdout = sys.stdout
|
| 37 |
+
sys.stdout = sys.stderr
|
| 38 |
+
try:
|
| 39 |
+
result = evaluate_fn(program_path)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
sys.stdout = real_stdout
|
| 42 |
+
print(
|
| 43 |
+
json.dumps(
|
| 44 |
+
{
|
| 45 |
+
"status": "error",
|
| 46 |
+
"combined_score": 0.0,
|
| 47 |
+
"metrics": {"combined_score": 0.0},
|
| 48 |
+
"artifacts": {
|
| 49 |
+
"error": str(e),
|
| 50 |
+
"traceback": traceback.format_exc(),
|
| 51 |
+
},
|
| 52 |
+
}
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
return
|
| 56 |
+
sys.stdout = real_stdout
|
| 57 |
+
|
| 58 |
+
if not isinstance(result, dict):
|
| 59 |
+
print(
|
| 60 |
+
json.dumps(
|
| 61 |
+
{
|
| 62 |
+
"status": "error",
|
| 63 |
+
"combined_score": 0.0,
|
| 64 |
+
"metrics": {"combined_score": 0.0},
|
| 65 |
+
"artifacts": {
|
| 66 |
+
"error": f"evaluate() returned {type(result).__name__}, expected dict"
|
| 67 |
+
},
|
| 68 |
+
}
|
| 69 |
+
)
|
| 70 |
+
)
|
| 71 |
+
return
|
| 72 |
+
|
| 73 |
+
# Separate numeric metrics from non-numeric artifacts.
|
| 74 |
+
metrics = {}
|
| 75 |
+
artifacts = {}
|
| 76 |
+
for k, v in result.items():
|
| 77 |
+
if isinstance(v, bool):
|
| 78 |
+
metrics[k] = float(v)
|
| 79 |
+
elif isinstance(v, (int, float)):
|
| 80 |
+
metrics[k] = float(v)
|
| 81 |
+
elif isinstance(v, str):
|
| 82 |
+
artifacts[k] = v
|
| 83 |
+
elif isinstance(v, (list, dict)):
|
| 84 |
+
artifacts[k] = json.dumps(v)
|
| 85 |
+
|
| 86 |
+
if "combined_score" not in metrics:
|
| 87 |
+
metrics["combined_score"] = 0.0
|
| 88 |
+
|
| 89 |
+
status = "error" if "error" in artifacts else "success"
|
| 90 |
+
output = {
|
| 91 |
+
"status": status,
|
| 92 |
+
"combined_score": metrics["combined_score"],
|
| 93 |
+
"metrics": metrics,
|
| 94 |
+
}
|
| 95 |
+
if artifacts:
|
| 96 |
+
output["artifacts"] = artifacts
|
| 97 |
+
|
| 98 |
+
print(json.dumps(output))
|
benchmarks/ADRS/prism/evaluator/evaluate.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
PROGRAM="$1"
|
| 5 |
+
# MODE ($2) accepted but ignored — override this file to use train/test splits.
|
| 6 |
+
|
| 7 |
+
python /benchmark/evaluator.py "$PROGRAM"
|
benchmarks/ADRS/prism/initial_program.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GPU_MEM_SIZE = 80 # GB
|
| 2 |
+
|
| 3 |
+
# EVOLVE-BLOCK-START
|
| 4 |
+
|
| 5 |
+
def compute_model_placement(gpu_num, models):
|
| 6 |
+
"""
|
| 7 |
+
Compute a model placement that minimizes the maximum KVPR across all GPUs.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
gpu_num: Number of GPUs
|
| 11 |
+
models: List of models to place
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
A placement of models to GPUs
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
# Greedy KVPR-minimizing placement based on Algorithm 1 (without τ check)
|
| 18 |
+
# 1) Sort models by r_j / s_j in descending order
|
| 19 |
+
sorted_models = sorted(models, key=lambda m: (m.req_rate / m.slo), reverse=True)
|
| 20 |
+
|
| 21 |
+
# 2) Initialize per-GPU states
|
| 22 |
+
placement = {gpu_id: [] for gpu_id in range(gpu_num)}
|
| 23 |
+
shared_kv = [GPU_MEM_SIZE for _ in range(gpu_num)] # remaining memory per GPU
|
| 24 |
+
weighted_req_rate = [0.0 for _ in range(gpu_num)] # sum of r_j / s_j per GPU
|
| 25 |
+
|
| 26 |
+
# 3) Assign each model to the GPU that minimizes current KVPR while fitting in memory
|
| 27 |
+
for model in sorted_models:
|
| 28 |
+
best_idx = None
|
| 29 |
+
best_ratio = float('inf')
|
| 30 |
+
|
| 31 |
+
for gpu_id in range(gpu_num):
|
| 32 |
+
if model.model_size <= shared_kv[gpu_id] and shared_kv[gpu_id] > 0:
|
| 33 |
+
current_ratio = weighted_req_rate[gpu_id] / shared_kv[gpu_id]
|
| 34 |
+
if current_ratio < best_ratio:
|
| 35 |
+
best_ratio = current_ratio
|
| 36 |
+
best_idx = gpu_id
|
| 37 |
+
|
| 38 |
+
# Failure: if no GPU can fit, raise an error instead of overcommitting
|
| 39 |
+
if best_idx is None:
|
| 40 |
+
raise ValueError(
|
| 41 |
+
f"Unable to place model of size {model.model_size} GB on any GPU. "
|
| 42 |
+
f"Remaining per-GPU memory: {shared_kv}"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
placement[best_idx].append(model)
|
| 46 |
+
weighted_req_rate[best_idx] += model.req_rate / model.slo
|
| 47 |
+
shared_kv[best_idx] -= model.model_size
|
| 48 |
+
|
| 49 |
+
return placement
|
| 50 |
+
|
| 51 |
+
# EVOLVE-BLOCK-END
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if __name__ == "__main__":
|
| 55 |
+
# Test the algorithm
|
| 56 |
+
|
| 57 |
+
from evaluator import generate_test_gpu_models
|
| 58 |
+
from evaluator import calculate_kvcache_pressure
|
| 59 |
+
from evaluator import safe_float
|
| 60 |
+
import numpy as np
|
| 61 |
+
|
| 62 |
+
test_cases = generate_test_gpu_models()
|
| 63 |
+
all_kvpr = []
|
| 64 |
+
for i, (gpu_num, gpu_models) in enumerate(test_cases):
|
| 65 |
+
|
| 66 |
+
results = compute_model_placement(gpu_num, gpu_models)
|
| 67 |
+
max_kvpr = calculate_kvcache_pressure(results)
|
| 68 |
+
all_kvpr.append(safe_float(max_kvpr))
|
| 69 |
+
|
| 70 |
+
avg_kvpr = np.mean(all_kvpr)
|
| 71 |
+
if avg_kvpr != 0:
|
| 72 |
+
avg_kvpr = 1.0 / avg_kvpr
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
print(f"Max KVPR: {avg_kvpr:.3f}")
|
benchmarks/ADRS/prism/initial_program_naive.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
|
| 3 |
+
GPU_MEM_SIZE = 80 # GB
|
| 4 |
+
|
| 5 |
+
def compute_model_placement(gpu_num, models):
|
| 6 |
+
"""
|
| 7 |
+
Compute a model placement that minimizes the maximum KVPR across all GPUs.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
gpu_num: Number of GPUs
|
| 11 |
+
models: List of models to place
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
A placement of models to GPUs
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
# gready algorithm to place models to the GPUs with smallest gpu_id first
|
| 18 |
+
|
| 19 |
+
placement = dict()
|
| 20 |
+
for gpu_id in range(gpu_num):
|
| 21 |
+
placement[gpu_id] = []
|
| 22 |
+
|
| 23 |
+
for model in models:
|
| 24 |
+
for gpu_id in range(gpu_num):
|
| 25 |
+
if model.model_size <= GPU_MEM_SIZE - sum(model.model_size for model in placement[gpu_id]):
|
| 26 |
+
placement[gpu_id].append(model)
|
| 27 |
+
break
|
| 28 |
+
return placement
|
| 29 |
+
|
| 30 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ADRS/txn_scheduling/config.yaml
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Transaction Scheduling — Minimize makespan for database workloads
|
| 2 |
+
# Usage: skydiscover-run initial_program.py evaluator.py -c config.yaml -s <strategy>
|
| 3 |
+
language: python
|
| 4 |
+
diff_based_generation: true
|
| 5 |
+
max_iterations: 100
|
| 6 |
+
checkpoint_interval: 5
|
| 7 |
+
max_solution_length: 60000
|
| 8 |
+
|
| 9 |
+
llm:
|
| 10 |
+
api_base: https://api.openai.com/v1
|
| 11 |
+
models:
|
| 12 |
+
- name: "gpt-5"
|
| 13 |
+
weight: 1.0
|
| 14 |
+
max_tokens: 32000
|
| 15 |
+
timeout: 600
|
| 16 |
+
|
| 17 |
+
prompt:
|
| 18 |
+
system_message: |-
|
| 19 |
+
You are an expert in database transaction optimization.
|
| 20 |
+
Only change code within EVOLVE-BLOCK-START and EVOLVE-BLOCK-END.
|
| 21 |
+
Your task is to improve a scheduling function to find better schedules for transactional workloads made up of read and write operations to data items. There are conflicts between these transactions on items and reducing the delay of these conflicts will lead to schedules with lower makespan. Focus on improving the get_best_schedule function to find a schedule with as low makespan as possible.
|
| 22 |
+
|
| 23 |
+
**TASK:** Improve the `get_best_schedule` function to find optimal transaction schedules that minimize makespan for database workloads with read/write conflicts.
|
| 24 |
+
|
| 25 |
+
**PROBLEM SPECIFICS:**
|
| 26 |
+
- **Input:** JSON workload with transactions like `"txn0":"w-17 r-5 w-3 r-4 r-54 r-14 w-6 r-11 w-22 r-7 w-1 w-8 w-9 w-27 r-2 r-25"`
|
| 27 |
+
- **Operations:** Each transaction is a sequence of read (`r-{key}`) and write (`w-{key}`) operations on data items
|
| 28 |
+
- **Conflicts:** Read-write and write-write conflicts on the same key create dependencies between transactions
|
| 29 |
+
- **Goal:** Find transaction ordering that minimizes total makespan
|
| 30 |
+
|
| 31 |
+
**SEARCH SUGGESTIONS:**
|
| 32 |
+
- **Greedy:** You can try a greedy algorithm to iteratively pick the transaction that increases makespan the least.
|
| 33 |
+
- Avoid only using heuristics like transaction length, number of writes, etc. because these do not correspond to the actual makespan of the schedule.
|
| 34 |
+
|
| 35 |
+
Focus on evolving the `get_best_schedule` function to produce the best schedule possible with the lowest makespan.
|
| 36 |
+
|
| 37 |
+
Explain step-by-step the reasoning process for your solution and how this will lead to a better schedule.
|
| 38 |
+
|
| 39 |
+
evaluator:
|
| 40 |
+
timeout: 600
|
| 41 |
+
cascade_evaluation: true
|
| 42 |
+
cascade_thresholds: [0.5, 0.75]
|
| 43 |
+
|
benchmarks/ADRS/txn_scheduling/evaluator/Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
WORKDIR /benchmark
|
| 3 |
+
|
| 4 |
+
COPY requirements.txt .
|
| 5 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 6 |
+
|
| 7 |
+
# wrapper.py provides backwards compatibility for old Python-based evaluators
|
| 8 |
+
# that define evaluate(program_path) -> dict, bridging them to the container
|
| 9 |
+
# JSON protocol. Source of truth: skydiscover/evaluation/wrapper.py
|
| 10 |
+
COPY . .
|
| 11 |
+
RUN chmod +x evaluate.sh
|
| 12 |
+
|
| 13 |
+
ENTRYPOINT ["./evaluate.sh"]
|
benchmarks/ADRS/txn_scheduling/evaluator/evaluate.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
PROGRAM="$1"
|
| 5 |
+
# MODE ($2) accepted but ignored — override this file to use train/test splits.
|
| 6 |
+
|
| 7 |
+
python /benchmark/evaluator.py "$PROGRAM"
|
benchmarks/ADRS/txn_scheduling/evaluator/evaluator.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib.util
|
| 2 |
+
import os
|
| 3 |
+
import pickle
|
| 4 |
+
import signal
|
| 5 |
+
import subprocess
|
| 6 |
+
import sys
|
| 7 |
+
import tempfile
|
| 8 |
+
import time
|
| 9 |
+
import traceback
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TimeoutError(Exception):
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def timeout_handler(signum, frame):
|
| 19 |
+
"""Handle timeout signal"""
|
| 20 |
+
raise TimeoutError("Function execution timed out")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def validate_schedule(txn_seq):
|
| 24 |
+
for i in range(len(txn_seq)):
|
| 25 |
+
if not i in txn_seq:
|
| 26 |
+
return False
|
| 27 |
+
|
| 28 |
+
return True
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def run_with_timeout(program_path, timeout_seconds=20):
|
| 32 |
+
"""
|
| 33 |
+
Run the program in a separate process with timeout
|
| 34 |
+
using a simple subprocess approach
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
program_path: Path to the program file
|
| 38 |
+
timeout_seconds: Maximum execution time in seconds
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
makespan, schedule tuple from the program
|
| 42 |
+
"""
|
| 43 |
+
# Create a temporary file to execute
|
| 44 |
+
# Ensure the scheduling module directory is on sys.path for imports like `import workloads`
|
| 45 |
+
sched_dir = os.path.dirname(os.path.abspath(__file__))
|
| 46 |
+
with tempfile.NamedTemporaryFile(suffix=".py", delete=False) as temp_file:
|
| 47 |
+
# Write a script that executes the program and saves results
|
| 48 |
+
script = f"""
|
| 49 |
+
import sys
|
| 50 |
+
import numpy as np
|
| 51 |
+
import os
|
| 52 |
+
import pickle
|
| 53 |
+
import traceback
|
| 54 |
+
|
| 55 |
+
# Add the directory to sys.path
|
| 56 |
+
sys.path.insert(0, os.path.dirname('{program_path}'))
|
| 57 |
+
# Also add the scheduling directory for importing sibling modules like `workloads`
|
| 58 |
+
sys.path.insert(0, r'{sched_dir}')
|
| 59 |
+
|
| 60 |
+
# Debugging info
|
| 61 |
+
print(f"Running in subprocess, Python version: {{sys.version}}")
|
| 62 |
+
print(f"Program path: {program_path}")
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
# Import the program
|
| 66 |
+
spec = __import__('importlib.util').util.spec_from_file_location("program", '{program_path}')
|
| 67 |
+
program = __import__('importlib.util').util.module_from_spec(spec)
|
| 68 |
+
spec.loader.exec_module(program)
|
| 69 |
+
|
| 70 |
+
# Run the packing function
|
| 71 |
+
print("Calling scheduling()...")
|
| 72 |
+
makespan, schedule = program.get_random_costs()
|
| 73 |
+
print(f"scheduling() returned successfully: makespan = {{makespan}}")
|
| 74 |
+
|
| 75 |
+
# Save results to a file
|
| 76 |
+
results = {{
|
| 77 |
+
'makespan': makespan,
|
| 78 |
+
'schedule': schedule,
|
| 79 |
+
}}
|
| 80 |
+
|
| 81 |
+
with open('{temp_file.name}.results', 'wb') as f:
|
| 82 |
+
pickle.dump(results, f)
|
| 83 |
+
print(f"Results saved to {temp_file.name}.results")
|
| 84 |
+
|
| 85 |
+
except Exception as e:
|
| 86 |
+
# If an error occurs, save the error instead
|
| 87 |
+
print(f"Error in subprocess: {{str(e)}}")
|
| 88 |
+
traceback.print_exc()
|
| 89 |
+
with open('{temp_file.name}.results', 'wb') as f:
|
| 90 |
+
pickle.dump({{'error': str(e)}}, f)
|
| 91 |
+
print(f"Error saved to {temp_file.name}.results")
|
| 92 |
+
"""
|
| 93 |
+
temp_file.write(script.encode())
|
| 94 |
+
temp_file_path = temp_file.name
|
| 95 |
+
|
| 96 |
+
results_path = f"{temp_file_path}.results"
|
| 97 |
+
|
| 98 |
+
try:
|
| 99 |
+
# Run the script with timeout
|
| 100 |
+
process = subprocess.Popen(
|
| 101 |
+
[sys.executable, temp_file_path],
|
| 102 |
+
stdout=subprocess.PIPE,
|
| 103 |
+
stderr=subprocess.PIPE,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
stdout, stderr = process.communicate(timeout=timeout_seconds)
|
| 108 |
+
exit_code = process.returncode
|
| 109 |
+
|
| 110 |
+
# Always print output for debugging purposes
|
| 111 |
+
print(f"Subprocess stdout: {stdout.decode()}")
|
| 112 |
+
if stderr:
|
| 113 |
+
print(f"Subprocess stderr: {stderr.decode()}")
|
| 114 |
+
|
| 115 |
+
# Still raise an error for non-zero exit codes, but only after printing the output
|
| 116 |
+
if exit_code != 0:
|
| 117 |
+
raise RuntimeError(f"Process exited with code {exit_code}")
|
| 118 |
+
|
| 119 |
+
# Load the results
|
| 120 |
+
if os.path.exists(results_path):
|
| 121 |
+
with open(results_path, "rb") as f:
|
| 122 |
+
results = pickle.load(f)
|
| 123 |
+
|
| 124 |
+
# Check if an error was returned
|
| 125 |
+
if "error" in results:
|
| 126 |
+
raise RuntimeError(f"Program execution failed: {results['error']}")
|
| 127 |
+
|
| 128 |
+
return results["makespan"], results["schedule"]
|
| 129 |
+
else:
|
| 130 |
+
raise RuntimeError("Results file not found")
|
| 131 |
+
|
| 132 |
+
except subprocess.TimeoutExpired:
|
| 133 |
+
# Kill the process if it times out
|
| 134 |
+
process.kill()
|
| 135 |
+
process.wait()
|
| 136 |
+
raise TimeoutError(f"Process timed out after {timeout_seconds} seconds")
|
| 137 |
+
|
| 138 |
+
finally:
|
| 139 |
+
# Clean up temporary files
|
| 140 |
+
if os.path.exists(temp_file_path):
|
| 141 |
+
os.unlink(temp_file_path)
|
| 142 |
+
if os.path.exists(results_path):
|
| 143 |
+
os.unlink(results_path)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def evaluate(program_path):
|
| 147 |
+
"""
|
| 148 |
+
Evaluate the program by running it once and checking the schedule
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
program_path: Path to the program file
|
| 152 |
+
|
| 153 |
+
Returns:
|
| 154 |
+
Dictionary of metrics
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
try:
|
| 158 |
+
# For constructor-based approaches, a single evaluation is sufficient
|
| 159 |
+
# since the result is deterministic
|
| 160 |
+
start_time = time.time()
|
| 161 |
+
|
| 162 |
+
# Use subprocess to run with timeout
|
| 163 |
+
makespan, schedule = run_with_timeout(
|
| 164 |
+
program_path, timeout_seconds=600 # Single timeout
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
end_time = time.time()
|
| 168 |
+
eval_time = end_time - start_time
|
| 169 |
+
|
| 170 |
+
# Validate solution
|
| 171 |
+
valid = True
|
| 172 |
+
for s in schedule:
|
| 173 |
+
valid &= validate_schedule(s)
|
| 174 |
+
if not valid:
|
| 175 |
+
break
|
| 176 |
+
|
| 177 |
+
# Validity score
|
| 178 |
+
validity = 1.0 if valid else 0.0
|
| 179 |
+
|
| 180 |
+
# Combined score - higher is better, positive values that scale with makespan
|
| 181 |
+
# Use reciprocal scaling: higher makespan = lower score, but always positive
|
| 182 |
+
combined_score = 1000 / (1 + makespan) * 1000
|
| 183 |
+
|
| 184 |
+
print(f"Evaluation: valid={valid}, makespan={makespan}, time={eval_time:.2f}s")
|
| 185 |
+
|
| 186 |
+
return {
|
| 187 |
+
"makespan": float(makespan),
|
| 188 |
+
"schedule": float(len(schedule)),
|
| 189 |
+
"validity": float(validity),
|
| 190 |
+
"combined_score": float(combined_score),
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
except Exception as e:
|
| 194 |
+
print(f"Evaluation failed completely: {str(e)}")
|
| 195 |
+
traceback.print_exc()
|
| 196 |
+
return {
|
| 197 |
+
"makespan": 0.0,
|
| 198 |
+
"schedule": 0.0,
|
| 199 |
+
"validity": 0.0,
|
| 200 |
+
"combined_score": 0.0,
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
# Stage-based evaluation for cascade evaluation
|
| 204 |
+
def evaluate_stage1(program_path):
|
| 205 |
+
"""
|
| 206 |
+
First stage evaluation - quick validation check
|
| 207 |
+
"""
|
| 208 |
+
try:
|
| 209 |
+
# Use the simplified subprocess approach
|
| 210 |
+
try:
|
| 211 |
+
makespan, schedule = run_with_timeout(program_path, timeout_seconds=600)
|
| 212 |
+
|
| 213 |
+
valid = True
|
| 214 |
+
for s in schedule:
|
| 215 |
+
valid &= validate_schedule(s)
|
| 216 |
+
if not valid:
|
| 217 |
+
break
|
| 218 |
+
|
| 219 |
+
# Simple combined score for stage 1 - positive values that scale with makespan
|
| 220 |
+
combined_score = 1000 / (1 + makespan) * 1000 if valid else 0.0
|
| 221 |
+
|
| 222 |
+
# Return evaluation metrics
|
| 223 |
+
return {
|
| 224 |
+
"validity": 1.0 if valid else 0.0,
|
| 225 |
+
"makespan": float(makespan),
|
| 226 |
+
"schedule": float(len(schedule)),
|
| 227 |
+
"combined_score": float(combined_score),
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
except TimeoutError as e:
|
| 231 |
+
print(f"Stage 1 evaluation timed out: {e}")
|
| 232 |
+
return {"validity": 0.0, "combined_score": 0.0, "error": "Timeout"}
|
| 233 |
+
except Exception as e:
|
| 234 |
+
print(f"Stage 1 evaluation failed: {e}")
|
| 235 |
+
print(traceback.format_exc())
|
| 236 |
+
return {"validity": 0.0, "combined_score": 0.0, "error": str(e)}
|
| 237 |
+
|
| 238 |
+
except Exception as e:
|
| 239 |
+
print(f"Stage 1 evaluation failed completely: {e}")
|
| 240 |
+
print(traceback.format_exc())
|
| 241 |
+
return {"validity": 0.0, "combined_score": 0.0, "error": str(e)}
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def evaluate_stage2(program_path):
|
| 245 |
+
"""
|
| 246 |
+
Second stage evaluation - full evaluation
|
| 247 |
+
"""
|
| 248 |
+
# Full evaluation as in the main evaluate function
|
| 249 |
+
return evaluate(program_path)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
if __name__ == "__main__":
|
| 253 |
+
# Backwards-compat: bridges old evaluate() -> dict to the container JSON
|
| 254 |
+
# protocol. wrapper.py is auto-injected at build time from
|
| 255 |
+
# skydiscover/evaluation/wrapper.py.
|
| 256 |
+
from wrapper import run
|
| 257 |
+
|
| 258 |
+
run(evaluate)
|
benchmarks/ADRS/txn_scheduling/evaluator/requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
numpy
|
benchmarks/ADRS/txn_scheduling/evaluator/txn_simulator.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import json
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Workload:
|
| 7 |
+
"""
|
| 8 |
+
Constructor for taking in transactions and representing them as (read/write, key, position, txn_len)
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self, workload_json, debug=False, verify=False):
|
| 11 |
+
self.workload = list(json.loads(workload_json).values())
|
| 12 |
+
self.num_txns = len(self.workload)
|
| 13 |
+
self.debug = debug
|
| 14 |
+
self.verify = verify
|
| 15 |
+
self.txns = [] # list of txns (list of ops)
|
| 16 |
+
self.only_hot_keys = False # True#
|
| 17 |
+
self.hot_keys_thres = 100
|
| 18 |
+
self.hot_keys = set()
|
| 19 |
+
self.hot_keys_map = {}
|
| 20 |
+
self.sorted_len = None
|
| 21 |
+
self.median_len = 0
|
| 22 |
+
self.conflict_blocks = []
|
| 23 |
+
self.conflict_blocks_map = {}
|
| 24 |
+
self.m = 0
|
| 25 |
+
|
| 26 |
+
self.get_txns()
|
| 27 |
+
|
| 28 |
+
# get transactions from json and represent hot keys as (r/w, key, position, txn_len)
|
| 29 |
+
|
| 30 |
+
def get_txns(self):
|
| 31 |
+
"""
|
| 32 |
+
Loads transactions from json and represents them as (read/write, key, position, txn_len)
|
| 33 |
+
"""
|
| 34 |
+
key_freqs = {}
|
| 35 |
+
len_map = {}
|
| 36 |
+
lens = []
|
| 37 |
+
for txn in self.workload: # .values()
|
| 38 |
+
txn_ops = []
|
| 39 |
+
ops = txn.split(" ")
|
| 40 |
+
txn_len = len(ops)
|
| 41 |
+
skip_txn = False
|
| 42 |
+
count = 0
|
| 43 |
+
tmp1 = None
|
| 44 |
+
tmp2 = None
|
| 45 |
+
tmp3 = None
|
| 46 |
+
# ops_map = {}
|
| 47 |
+
# all_ops = []
|
| 48 |
+
for i in range(len(ops)):
|
| 49 |
+
op = ops[i]
|
| 50 |
+
if op != "*":
|
| 51 |
+
vals = op.split("-")
|
| 52 |
+
if len(vals) != 2:
|
| 53 |
+
print(op, vals)
|
| 54 |
+
assert len(vals) == 2
|
| 55 |
+
# if i == 0 and int(vals[1]) > 500:
|
| 56 |
+
# skip_txn = True
|
| 57 |
+
# break
|
| 58 |
+
if self.only_hot_keys and int(vals[1]) > self.hot_keys_thres:
|
| 59 |
+
tmp1 = vals[0]
|
| 60 |
+
tmp2 = vals[1]
|
| 61 |
+
tmp3 = i + 1
|
| 62 |
+
continue
|
| 63 |
+
else:
|
| 64 |
+
count += 1
|
| 65 |
+
# sorted reads
|
| 66 |
+
# if vals[0] == 'r' and vals[1] not in ops_map:
|
| 67 |
+
# ops_map[vals[1]] = (vals[0], vals[1], i+1, len(ops))
|
| 68 |
+
# else:
|
| 69 |
+
# all_ops.append((vals[0], vals[1], i+1, len(ops)))
|
| 70 |
+
txn_ops.append((vals[0], vals[1], i + 1, len(ops)))
|
| 71 |
+
if vals[1] not in key_freqs:
|
| 72 |
+
key_freqs[vals[1]] = 1
|
| 73 |
+
else:
|
| 74 |
+
key_freqs[vals[1]] += 1
|
| 75 |
+
if len(ops) not in len_map:
|
| 76 |
+
len_map[len(ops)] = 1
|
| 77 |
+
else:
|
| 78 |
+
len_map[len(ops)] += 1
|
| 79 |
+
lens.append(len(ops))
|
| 80 |
+
# sorted_keys = collections.OrderedDict(sorted(ops_map.items()))
|
| 81 |
+
# txn_ops = list(sorted_keys.values()) + all_ops
|
| 82 |
+
# assert len(txn_ops) == len(ops)
|
| 83 |
+
# print(sorted_keys, txn_ops)
|
| 84 |
+
if count == 0 and self.only_hot_keys:
|
| 85 |
+
txn_ops.append((tmp1, tmp2, tmp3, len(ops)))
|
| 86 |
+
# if skip_txn:
|
| 87 |
+
# continue
|
| 88 |
+
self.txns.append(txn_ops)
|
| 89 |
+
if self.debug:
|
| 90 |
+
print(self.txns)
|
| 91 |
+
self.num_txns = len(self.txns)
|
| 92 |
+
|
| 93 |
+
# make sure key_map is roughly in order per key
|
| 94 |
+
def insert_key_map(self, key, key_map, op_type, key_start, key_end, txn_id):
|
| 95 |
+
index = len(key_map[key]) - 1
|
| 96 |
+
for op in key_map[key]:
|
| 97 |
+
(_, s, e, _) = key_map[key][index]
|
| 98 |
+
if e <= key_end:
|
| 99 |
+
if s <= key_start: # e <= key_start or
|
| 100 |
+
index += 1
|
| 101 |
+
break
|
| 102 |
+
elif s <= key_start:
|
| 103 |
+
index += 1
|
| 104 |
+
break
|
| 105 |
+
index -= 1
|
| 106 |
+
if index == -1:
|
| 107 |
+
(_, s, e, _) = key_map[key][0]
|
| 108 |
+
if key_end < e and key_start < s:
|
| 109 |
+
key_map[key].insert(0, (op_type, key_start, key_end, txn_id))
|
| 110 |
+
else:
|
| 111 |
+
key_map[key].append((op_type, key_start, key_end, txn_id))
|
| 112 |
+
else:
|
| 113 |
+
key_map[key].insert(index, (op_type, key_start, key_end, txn_id))
|
| 114 |
+
if self.debug:
|
| 115 |
+
print("insert: ", index, key, key_start, key_end, key_map[key])
|
| 116 |
+
|
| 117 |
+
# get the index of the first in a consecutive seq. of reads
|
| 118 |
+
def find_earliest_read(self, key, key_map, txn_id):
|
| 119 |
+
# must use latest read if part of same txn
|
| 120 |
+
if key_map[key][-1][3] == txn_id: # as we're adding to key_map
|
| 121 |
+
print("TXN_ID")
|
| 122 |
+
return key_map[key][-1][1]
|
| 123 |
+
else:
|
| 124 |
+
if self.debug:
|
| 125 |
+
print(key, key_map[key], txn_id)
|
| 126 |
+
index = len(key_map[key]) - 1
|
| 127 |
+
while key_map[key][index][0] == "r":
|
| 128 |
+
if index == -1:
|
| 129 |
+
break
|
| 130 |
+
index -= 1
|
| 131 |
+
if self.debug:
|
| 132 |
+
print("index: ", index)
|
| 133 |
+
if index == -1: # can be first read
|
| 134 |
+
index = 0
|
| 135 |
+
else:
|
| 136 |
+
index = key_map[key][index][2] + 1 # after first write found
|
| 137 |
+
return index
|
| 138 |
+
|
| 139 |
+
def get_opt_seq_cost(self, txn_seq):
|
| 140 |
+
"""
|
| 141 |
+
Gets the makespan of a given sequence of transactions
|
| 142 |
+
|
| 143 |
+
Returns
|
| 144 |
+
Value representing the makespan (time to execute given schedule)
|
| 145 |
+
"""
|
| 146 |
+
if self.debug:
|
| 147 |
+
print("seq: ", txn_seq)
|
| 148 |
+
key_map = {} # <key, [(r/w, lock_start, lock_end, txn_id)]>
|
| 149 |
+
prev_txn = txn_seq[0]
|
| 150 |
+
total_cost = 0
|
| 151 |
+
txn_id = 0
|
| 152 |
+
cost_map = {}
|
| 153 |
+
for i in range(len(txn_seq)):
|
| 154 |
+
time = i
|
| 155 |
+
txn = self.txns[txn_seq[i]]
|
| 156 |
+
txn_start = 1
|
| 157 |
+
txn_total_len = 0
|
| 158 |
+
max_release = 0
|
| 159 |
+
cost = 0
|
| 160 |
+
for j in range(len(txn)):
|
| 161 |
+
(op_type, key, pos, txn_len) = txn[j]
|
| 162 |
+
if key in key_map:
|
| 163 |
+
key_start = 0
|
| 164 |
+
if key_map[key][-1][0] == "w" or op_type == "w":
|
| 165 |
+
key_start = (
|
| 166 |
+
key_map[key][-1][2] + 1
|
| 167 |
+
) # get end time of latest lock end
|
| 168 |
+
else:
|
| 169 |
+
key_start = self.find_earliest_read(key, key_map, txn_id)
|
| 170 |
+
# key_start = key_map[key][-1][1] #pos # read locks shared
|
| 171 |
+
txn_start = max(
|
| 172 |
+
txn_start, key_start - pos + 1
|
| 173 |
+
) # place txn start behind conflicting locks
|
| 174 |
+
if self.debug:
|
| 175 |
+
print(key, key_start, pos, txn_start)
|
| 176 |
+
max_release = max(
|
| 177 |
+
max_release, key_start - 1
|
| 178 |
+
) # latest release of all locks
|
| 179 |
+
txn_total_len = txn_len
|
| 180 |
+
txn_end = txn_start + txn_total_len - 1
|
| 181 |
+
cost = txn_end - total_cost # max_release
|
| 182 |
+
# if max_release == 0:
|
| 183 |
+
if (
|
| 184 |
+
txn_end <= total_cost
|
| 185 |
+
): # in some cases, later txn in seq can finish first
|
| 186 |
+
cost = 0
|
| 187 |
+
# else:
|
| 188 |
+
# cost = txn_end - total_cost
|
| 189 |
+
if cost in cost_map:
|
| 190 |
+
cost_map[cost] += 1
|
| 191 |
+
else:
|
| 192 |
+
cost_map[cost] = 1
|
| 193 |
+
total_cost += cost
|
| 194 |
+
if self.debug:
|
| 195 |
+
print(txn, txn_start, txn_end, max_release, cost, total_cost)
|
| 196 |
+
|
| 197 |
+
curr_txn = txn_seq[i]
|
| 198 |
+
prev_txn = curr_txn
|
| 199 |
+
if self.debug:
|
| 200 |
+
print(txn_start, txn_end, max_release, cost)
|
| 201 |
+
|
| 202 |
+
for j in range(len(txn)):
|
| 203 |
+
(op_type, key, pos, txn_len) = txn[j]
|
| 204 |
+
key_start = txn_start + pos - 1
|
| 205 |
+
if key in key_map:
|
| 206 |
+
if key_map[key][-1][0] == "w" or op_type == "w":
|
| 207 |
+
self.insert_key_map(
|
| 208 |
+
key, key_map, op_type, key_start, key_start, txn_id
|
| 209 |
+
)
|
| 210 |
+
# key_map[key].append((op_type, key_start, key_start, txn_id))
|
| 211 |
+
else:
|
| 212 |
+
self.insert_key_map(
|
| 213 |
+
key, key_map, op_type, key_start, key_start, txn_id
|
| 214 |
+
)
|
| 215 |
+
# key_map[key].append((op_type, key_start, key_start, txn_id))
|
| 216 |
+
else:
|
| 217 |
+
key_map[key] = [(op_type, key_start, key_start, txn_id)]
|
| 218 |
+
if self.debug:
|
| 219 |
+
print(key_map)
|
| 220 |
+
txn_id += 1
|
| 221 |
+
if self.debug:
|
| 222 |
+
print(total_cost)
|
| 223 |
+
|
| 224 |
+
# print(key_map)
|
| 225 |
+
od = collections.OrderedDict(sorted(cost_map.items()))
|
| 226 |
+
# print(od.keys())
|
| 227 |
+
# print(od.values())
|
| 228 |
+
return total_cost
|
| 229 |
+
|
benchmarks/ADRS/txn_scheduling/evaluator/workloads.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Workload data for scheduling experiments.
|
| 3 |
+
|
| 4 |
+
This module contains predefined workload configurations used for testing
|
| 5 |
+
transaction scheduling algorithms.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
WORKLOAD_1 = '{"txn0":"w-17 r-5 w-3 r-4 r-54 r-14 w-6 r-11 w-22 r-7 w-1 w-8 w-9 w-27 r-2 r-25", "txn1":"r-17 r-280 r-38 r-3 r-4 r-5 w-10 w-195 r-6 w-18 w-7 r-1 r-8 r-9 r-2 w-21", "txn2":"r-5 r-3 w-4 w-14 r-10 w-38 w-6 r-11 r-7 r-1 w-30 r-8 r-9 r-12 w-2 r-15", "txn3":"w-17 w-4 r-3 r-5 r-14 w-6 w-80 r-11 r-16 r-1 w-19 r-9 w-12 w-2 w-73 w-15", "txn4":"w-45 w-4 r-3 w-5 w-6 w-201 w-781 w-20 w-253 r-1 r-65 r-30 w-8 w-23 r-12 w-2", "txn5":"w-45 r-5 w-3 r-4 w-14 r-199 r-6 w-11 r-16 w-7 w-1 w-8 r-12 r-2 r-21 w-15", "txn6":"r-5 w-3 w-10 w-2 w-6 w-11 w-16 r-53 w-22 w-7 w-1 r-8 r-12 r-4 w-13 r-32", "txn7":"w-17 r-21 w-4 r-3 w-5 w-10 w-101 w-6 w-16 r-50 r-1 w-8 r-13 w-2 w-28 r-15", "txn8":"w-4 w-3 r-5 w-14 w-10 w-19 w-6 r-11 r-18 w-67 r-7 r-1 r-8 r-13 r-2 w-27", "txn9":"w-17 r-5 r-3 r-4 r-6 w-47 w-11 w-7 w-20 w-1 w-50 w-9 w-13 w-2 w-248 r-15", "txn10":"w-35 r-5 r-3 w-4 w-6 w-11 w-16 w-542 w-7 r-1 w-36 w-8 r-13 w-2 w-69 r-15", "txn11":"w-9 w-17 r-4 w-3 w-14 w-10 w-6 r-131 r-16 r-7 w-942 w-1 w-50 w-8 r-23 r-2", "txn12":"r-29 r-21 w-4 w-5 r-3 w-10 w-19 r-6 w-7 r-20 r-1 r-8 w-9 w-12 r-2 w-13", "txn13":"r-35 r-5 w-3 w-4 r-10 r-143 w-6 w-18 w-7 w-1 w-8 w-9 r-12 w-2 r-25 r-254", "txn14":"r-17 r-4 r-3 w-55 r-5 r-10 w-6 r-11 w-16 r-7 w-60 w-1 r-9 w-13 r-2 r-32", "txn15":"r-4 r-3 w-183 w-26 w-6 w-47 w-11 r-486 w-7 r-1 w-8 w-9 w-13 w-2 w-12 r-15", "txn16":"r-17 w-4 w-3 r-5 r-76 w-10 r-6 w-11 w-22 w-7 w-1 w-8 r-49 w-27 w-2 r-21", "txn17":"r-41 w-34 w-4 r-3 r-5 w-14 r-10 w-48 w-6 w-7 w-1 w-23 w-12 r-2 r-27 r-25", "txn18":"w-17 w-4 w-3 w-5 w-14 w-10 r-1173 w-6 w-23 w-28 w-7 w-1 w-19 r-9 w-2 r-206", "txn19":"w-4 w-3 r-5 r-352 r-6 r-11 r-23 w-22 w-7 w-1 r-8 r-9 w-41 r-2 w-12 r-171", "txn20":"r-21 r-4 r-3 r-26 w-10 r-6 r-11 r-22 w-7 w-60 r-1 r-9 r-2 r-102 r-32 r-15", "txn21":"r-17 w-4 w-3 w-5 r-14 r-12 w-11 r-7 r-1 r-74 r-36 w-19 w-9 w-13 w-2 w-21", "txn22":"w-63 r-4 w-3 r-5 w-38 r-10 r-6 w-11 w-16 w-7 r-1 r-8 w-13 r-2 w-21 w-15", "txn23":"w-5 w-4 w-3 w-10 r-94 w-11 w-18 r-39 r-7 r-61 r-1 r-9 r-13 r-2 w-21 r-15", "txn24":"w-4 w-3 r-5 r-10 w-6 w-11 r-16 w-53 r-7 r-50 w-1 r-61 r-8 w-12 w-2 w-13", "txn25":"r-21 w-4 w-3 w-14 w-6 w-16 r-33 w-7 w-1 r-30 w-19 w-9 w-12 r-2 w-28 r-8", "txn26":"r-4 w-3 w-5 w-10 w-6 w-11 r-16 w-1289 r-331 w-71 w-7 r-1 w-8 w-9 w-13 w-2", "txn27":"r-34 r-4 r-3 w-5 w-14 w-76 w-10 r-6 r-11 r-53 w-7 r-1 r-13 w-2 w-25 w-15", "txn28":"r-29 r-4 w-3 w-5 w-10 r-6 w-11 r-16 w-61 r-20 r-1 r-8 w-9 r-13 r-2 r-27", "txn29":"r-2041 w-5 r-3 r-4 r-5270 r-10 r-14 w-6 r-11 w-87 r-1 r-8 w-9 w-12 r-2 w-13", "txn30":"w-45 r-5 r-4 w-3 r-51 w-10 w-6 r-16 r-7 r-1 r-30 w-8 r-9 w-12 w-2 w-28", "txn31":"r-34 w-4 w-232 r-3 w-14 w-5 w-54 w-6 r-25 w-7 r-20 r-1 r-49 w-13 w-2 r-32", "txn32":"r-5 r-3 w-14 r-10 w-58 w-109 r-2 r-11 w-204 r-7 w-1 r-30 r-37 r-8 w-4 r-565", "txn33":"w-17 w-5 w-4 r-3 r-26 w-10 w-6 w-24 w-438 w-7 w-1 r-8 r-9 r-2 w-25 w-15", "txn34":"w-72 w-4 r-3 w-5 w-52 w-10 w-96 w-1072 r-6 r-14 w-31 r-1 w-8 r-23 r-12 r-2", "txn35":"r-34 w-4 w-3 r-5 w-14 w-10 r-6 w-11 r-7 r-1 w-42 r-8 w-128 r-2 r-69 w-15", "txn36":"r-17 w-4 w-3 w-76 r-5 r-10 r-32 r-14 r-164 r-11 w-7 r-1 w-23 w-12 w-2 r-25", "txn37":"w-45 r-17 w-4 r-3 w-5 r-89 r-14 w-6 r-11 w-16 w-7 w-1 w-43 w-8 w-9 r-2", "txn38":"r-126 r-17 r-4 r-3 r-14 r-195 w-6 w-82 r-7 w-1 r-1529 w-8 w-9 w-13 w-2 w-15", "txn39":"w-623 r-4 w-3 w-5 w-10 r-6 w-11 r-7 r-31 w-1 w-36 r-8 w-12 r-2 r-69 w-15", "txn40":"r-5 r-4 r-3 w-6 r-197 r-16 r-18 w-25 w-7 w-65 r-1 r-8 w-23 w-2 r-32 r-15", "txn41":"w-5 w-3 r-4 r-14 w-10 w-19 r-6 w-23 w-7 w-31 r-1 w-8 r-9 r-12 w-2 w-21", "txn42":"r-21 r-72 r-5 r-3 w-4 w-6 r-47 w-142 r-50 w-1 r-8 w-9 w-41 r-2 w-13 r-25", "txn43":"r-17 w-4 r-3 r-5 w-10 w-108 r-6 r-11 w-22 w-7 w-1 w-8 w-9 w-12 r-2 w-28", "txn44":"w-34 w-4 w-3 r-5 r-54 r-10 r-23 w-16 w-7 r-1 w-36 w-177 r-8 r-9 w-42 w-2", "txn45":"w-5 w-3 w-4 w-38 r-10 w-16 r-49 r-20 w-7 w-1 r-66 r-36 w-8 r-9 w-2 r-21", "txn46":"w-112 w-17 w-4 r-3 w-5 r-38 r-10 r-6 r-7 w-1 w-77 w-8 w-9 r-13 w-2 r-21", "txn47":"w-265 r-534 w-4 r-3 w-5 w-10 r-6 r-11 w-16 r-1 w-8 w-9 w-13 r-2 r-12 w-15", "txn48":"r-4 r-5 r-3 r-26 w-6 w-11 w-7 r-20 r-1 w-156 w-8 w-9 r-12 r-2 r-25 w-15", "txn49":"w-17 w-5 w-3 w-4 w-134 w-10 r-6 r-22 w-7 w-1 r-37 w-8 r-27 w-2 w-32 w-15", "txn50":"w-29 w-35 r-5 w-3 w-14 w-10 w-6 r-2 w-18 r-33 w-7 r-20 w-1 w-13 w-4 w-12", "txn51":"r-5 w-3 w-4 w-81 r-6 w-11 w-16 w-7 w-20 w-1 r-8 r-19 w-9 w-2 w-21 r-15", "txn52":"r-17 w-4 w-3 w-5 w-100 r-26 r-6 r-22 w-44 w-33 w-7 w-1 w-66 w-8 w-9 r-2", "txn53":"r-72 r-5 w-3 w-4 w-14 w-19 w-6 r-39 r-28 w-7 w-1 r-8 w-41 w-2 r-12 w-15", "txn54":"w-4 r-3 w-76 r-5 w-12 w-6 r-11 w-7 w-1 w-30 r-8 w-9 w-13 w-2 r-21 w-203", "txn55":"w-4 w-3 w-5 w-14 r-10 r-6 w-7 w-31 w-1 w-8 r-9 r-13 r-2 r-819 r-56 w-15", "txn56":"w-35 w-4 w-3 r-5 w-6 w-143 r-18 r-22 r-59 r-7 w-67 w-1 w-57 r-9 w-2 r-8", "txn57":"r-4 w-3 r-5 r-10 r-19 r-11 w-46 w-20 w-7 r-1 r-117 r-36 w-8 w-9 r-13 r-2", "txn58":"w-17 r-48 r-4 w-3 r-5 w-139 r-10 r-410 r-6 w-11 w-16 w-1 w-8 w-2 r-28 r-103", "txn59":"w-5 w-3 r-4 w-38 r-56 w-14 r-18 w-39 r-7 w-50 w-1 r-8 r-13 r-2 w-148 w-1520", "txn60":"r-17 r-5 w-3 r-4 w-14 w-81 w-375 w-6 r-11 r-18 r-1 w-8 w-9 r-12 w-2 w-13", "txn61":"w-17 w-34 w-4 w-3 r-5 r-14 r-10 r-19 w-6 r-11 w-18 w-16 w-1 w-8 r-2 r-15", "txn62":"r-17 r-4 w-3 r-5 r-83 r-10 r-19 w-6 w-11 w-1 w-30 w-8 w-13 w-2 w-28 w-25", "txn63":"w-5 w-3 w-4 r-14 r-10 w-278 r-6 r-16 r-7 r-1 r-81 w-9 w-12 w-2 w-13 r-8", "txn64":"w-35 w-21 w-5 w-3 r-4 w-34 w-18 r-7 r-31 w-1 r-8 r-9 r-12 w-2 w-13 r-15", "txn65":"w-4 w-3 w-13 w-14 r-10 r-6 r-23 w-18 r-7 w-1 r-30 r-8 w-9 r-27 r-2 w-12", "txn66":"r-15 w-4 w-3 r-5 w-38 w-48 r-10 w-6 r-20 w-1 w-66 r-24 w-9 w-13 r-2 w-8", "txn67":"r-5 r-3 r-6 w-2 w-11 w-18 r-46 r-7 r-1 r-30 w-3031 r-24 w-9 w-12 w-4 w-28", "txn68":"r-84 w-4 r-3 w-52 w-145 r-6 w-47 w-46 r-7 r-65 r-1 w-376 w-24 w-13 w-2 r-15", "txn69":"w-4 w-3 r-5 w-10 r-6 w-11 w-18 w-39 w-16 r-20 w-7 w-1 r-77 w-8 w-9 r-2", "txn70":"r-4 w-3 r-5 r-26 w-79 r-6 w-11 w-53 w-16 r-7 w-211 r-1 w-24 w-9 r-13 w-2", "txn71":"w-17 w-4 w-3 w-5 w-14 w-10 r-6 r-11 w-7 r-20 r-1 w-19 r-9 w-12 r-2 w-8", "txn72":"r-4 r-3 w-5 r-52 w-10 w-48 r-14 r-6 w-23 w-11 w-7 w-1 w-8 w-9 r-13 r-2", "txn73":"w-34 r-4 w-3 w-5 w-14 r-10 w-39 r-33 r-7 w-156 r-1 w-8 r-9 w-13 w-2 w-119", "txn74":"w-29 w-84 r-5 r-3 r-4 r-10 w-6 w-11 w-33 w-7 r-1 r-42 r-23 w-55 r-2 r-25", "txn75":"w-98 w-4 w-3 w-5 r-10 r-278 w-6 r-68 r-18 r-33 w-7 r-1 w-9 w-12 r-2 w-25", "txn76":"r-17 r-5 w-3 r-4 r-26 w-6 r-11 w-71 w-7 w-50 w-1 r-8 r-9 w-13 w-2 r-15", "txn77":"r-5 w-3 r-48 w-14 w-6 w-2 w-23 r-28 w-7 w-31 w-1 w-36 w-9 w-4 r-21 w-25", "txn78":"w-343 r-72 w-17 w-5 r-3 w-4 r-189 r-51 w-11 w-33 r-7 r-1 w-8 r-9 r-2 r-28", "txn79":"r-112 r-34 w-4 w-3 w-5 w-10 r-6 r-25 r-7 w-1 w-8 w-9 w-13 w-2 r-21 r-32", "txn80":"r-4 r-3 r-5 w-10 r-6 w-11 r-18 w-33 w-7 r-31 w-1 w-8 w-9 r-13 r-2 w-12", "txn81":"r-4 r-3 r-5 r-147 w-6 w-23 r-18 r-16 w-24 w-7 w-1 w-66 r-8 w-9 w-12 w-2", "txn82":"w-35 w-34 w-48 w-3 w-17 w-14 r-5 w-10 w-109 w-6 r-4 r-16 w-1 r-9 r-12 w-2", "txn83":"w-4 r-3 w-5 w-10 w-224 w-6 r-11 w-18 w-16 w-85 r-7 w-1 w-8 r-9 w-12 r-2", "txn84":"w-21 w-5 r-3 w-83 w-4 w-10 w-16 r-7 r-1 w-8 w-19 w-9 r-13 w-2 r-12 r-15", "txn85":"r-4 w-3 w-209 r-5 w-10 w-11 w-24 w-18 r-16 r-891 r-7 w-67 w-1 w-8 w-2 w-21", "txn86":"r-41 w-4 r-3 w-5 w-6 r-70 w-273 w-7 r-1 w-77 r-8 r-13 r-2 r-21 w-25 w-15", "txn87":"w-29 r-35 r-34 w-4 r-3 w-10 r-128 w-6 w-18 w-7 w-31 w-1 w-9 r-12 r-2 w-32", "txn88":"r-435 r-34 r-5 r-3 r-4 r-14 r-10 w-6 w-49 r-7 w-1 w-8 r-23 w-27 w-2 w-13", "txn89":"r-45 w-126 w-5 r-3 w-4 w-6 r-16 w-39 w-7 r-31 w-1 w-8 r-9 r-12 r-2 w-25", "txn90":"r-4 w-5 w-3 r-26 r-10 w-51 w-6 w-23 w-33 r-1 w-42 r-9 w-41 r-2 r-25 r-40", "txn91":"w-149 w-17 w-5 w-4 r-3 w-14 r-6 r-11 w-18 w-7 r-78 w-1 w-9 r-12 w-2 r-15", "txn92":"r-34 r-5 r-3 r-4 r-26 w-10 r-6 w-11 r-18 w-7 r-1 w-37 w-9 r-12 r-2 r-25", "txn93":"r-4 r-3 w-5 w-121 w-233 r-10 r-11 r-16 r-7 r-1 w-547 w-8 w-9 r-13 r-2 w-12", "txn94":"w-5 w-4 r-3 w-14 r-109 r-68 w-6 w-644 r-22 w-7 w-1 w-8 r-9 w-2 w-28 w-15", "txn95":"w-302 r-4 r-3 r-5 w-51 w-10 r-6 w-49 r-7 w-20 w-1 r-37 w-8 w-9 w-12 w-2", "txn96":"w-5 w-3 r-58 w-6 r-2 r-18 r-22 w-313 w-20 r-7 w-1 w-24 w-9 w-4 w-28 r-8", "txn97":"w-15 r-5 r-3 r-4 w-10 r-47 r-6 w-11 w-18 w-7 r-1 w-19 w-9 w-2 w-480 r-8", "txn98":"r-17 r-4 r-3 r-5 r-26 r-14 w-6 r-639 w-22 r-7 r-1 r-77 r-8 w-9 r-2 r-15", "txn99":"w-4 w-3 w-5 w-26 w-14 r-10 w-241 w-6 w-53 r-24 w-1 r-19 w-9 r-13 r-2 w-8"}'
|
| 9 |
+
|
| 10 |
+
WORKLOAD_2 = '{"txn0":"r-3 r-40 w-40 * * * * * * * * * * * * * * * * * *", "txn1":"r-1 r-13 w-13 * * * * * * * * * * * * * * * * * *", "txn2":"r-3 r-32 w-32 * * * * * * * * * * * * * * * * * *", "txn3":"r-2 r-29 w-29 * * * * * * * * * * * * * * * * * *", "txn4":"r-4 r-44 w-44 * * * * * * * * * * * * * * * * * *", "txn5":"r-2 r-23 w-23 * * * * * * * * * * * * * * * * * *", "txn6":"r-1 r-13 w-13 * * * * * * * * * * * * * * * * * *", "txn7":"r-3 r-34 w-34 * * * * * * * * * * * * * * * * * *", "txn8":"r-3 r-34 w-34 * * * * * * * * * * * * * * * * * *", "txn9":"r-2 r-25 w-25 * * * * * * * * * * * * * * * * * *", "txn10":"r-3 r-35 w-35 * * * * * * * * * * * * * * * * * *", "txn11":"r-4 r-43 w-43 * * * * * * * * * * * * * * * * * *", "txn12":"r-2 r-21 w-21 * * * * * * * * * * * * * * * * * *", "txn13":"r-2 r-30 w-30 * * * * * * * * * * * * * * * * * *", "txn14":"r-2 r-29 w-29 * * * * * * * * * * * * * * * * * *", "txn15":"r-2 r-27 w-27 * * * * * * * * * * * * * * * * * *", "txn16":"r-3 r-39 w-39 * * * * * * * * * * * * * * * * * *", "txn17":"r-3 r-34 w-34 * * * * * * * * * * * * * * * * * *", "txn18":"r-1 r-19 w-19 * * * * * * * * * * * * * * * * * *", "txn19":"r-4 r-48 w-48 * * * * * * * * * * * * * * * * * *", "txn20":"r-4 r-50 w-50 * * * * * * * * * * * * * * * * * *", "txn21":"r-4 r-41 w-41 * * * * * * * * * * * * * * * * * *", "txn22":"r-2 r-29 w-29 * * * * * * * * * * * * * * * * * *", "txn23":"r-1 r-17 w-17 * * * * * * * * * * * * * * * * * *", "txn24":"r-1 r-19 w-19 * * * * * * * * * * * * * * * * * *", "txn25":"r-2 r-23 w-23 * * * * * * * * * * * * * * * * * *", "txn26":"r-4 r-47 w-47 * * * * * * * * * * * * * * * * * *", "txn27":"r-4 r-42 w-42 * * * * * * * * * * * * * * * * * *", "txn28":"r-4 r-50 w-50 * * * * * * * * * * * * * * * * * *", "txn29":"r-4 r-41 w-41 * * * * * * * * * * * * * * * * * *", "txn30":"r-3 r-32 w-32 * * * * * * * * * * * * * * * * * *", "txn31":"r-2 r-24 w-24 * * * * * * * * * * * * * * * * * *", "txn32":"r-1 r-12 w-12 * * * * * * * * * * * * * * * * * *", "txn33":"r-4 r-42 w-42 * * * * * * * * * * * * * * * * * *", "txn34":"r-2 r-23 w-23 * * * * * * * * * * * * * * * * * *", "txn35":"r-3 r-32 w-32 * * * * * * * * * * * * * * * * * *", "txn36":"r-3 r-40 w-40 * * * * * * * * * * * * * * * * * *", "txn37":"r-4 r-50 w-50 * * * * * * * * * * * * * * * * * *", "txn38":"r-3 r-32 w-32 * * * * * * * * * * * * * * * * * *", "txn39":"r-2 r-26 w-26 * * * * * * * * * * * * * * * * * *", "txn40":"r-4 r-41 w-41 * * * * * * * * * * * * * * * * * *", "txn41":"r-1 r-17 w-17 * * * * * * * * * * * * * * * * * *", "txn42":"r-1 r-11 w-11 * * * * * * * * * * * * * * * * * *", "txn43":"r-1 r-20 w-20 * * * * * * * * * * * * * * * * * *", "txn44":"r-3 r-34 w-34 * * * * * * * * * * * * * * * * * *", "txn45":"r-3 r-31 w-31 * * * * * * * * * * * * * * * * * *", "txn46":"r-3 r-39 w-39 * * * * * * * * * * * * * * * * * *", "txn47":"r-3 r-31 w-31 * * * * * * * * * * * * * * * * * *", "txn48":"r-2 r-30 w-30 * * * * * * * * * * * * * * * * * *", "txn49":"r-1 r-13 w-13 * * * * * * * * * * * * * * * * * *", "txn50":"r-1 w-1 r-15 w-15 *", "txn51":"r-1 w-1 r-13 w-13 *", "txn52":"r-3 w-3 r-31 w-31 *", "txn53":"r-1 w-1 r-20 w-20 *", "txn54":"r-4 w-4 r-41 w-41 *", "txn55":"r-3 w-3 r-35 w-35 *", "txn56":"r-1 w-1 r-12 w-12 *", "txn57":"r-2 w-2 r-24 w-24 *", "txn58":"r-2 w-2 r-22 w-22 *", "txn59":"r-2 w-2 r-30 w-30 *", "txn60":"r-2 w-2 r-22 w-22 *", "txn61":"r-4 w-4 r-46 w-46 *", "txn62":"r-4 w-4 r-50 w-50 *", "txn63":"r-2 w-2 r-23 w-23 *", "txn64":"r-2 w-2 r-29 w-29 *", "txn65":"r-3 w-3 r-32 w-32 *", "txn66":"r-3 w-3 r-32 w-32 *", "txn67":"r-4 w-4 r-45 w-45 *", "txn68":"r-1 w-1 r-13 w-13 *", "txn69":"r-2 w-2 r-23 w-23 *", "txn70":"r-4 w-4 r-48 w-48 *", "txn71":"r-1 w-1 r-15 w-15 *", "txn72":"r-1 w-1 r-17 w-17 *", "txn73":"r-2 w-2 r-23 w-23 *", "txn74":"r-4 w-4 r-43 w-43 *", "txn75":"r-4 w-4 r-48 w-48 *", "txn76":"r-3 w-3 r-37 w-37 *", "txn77":"r-4 w-4 r-48 w-48 *", "txn78":"r-3 w-3 r-32 w-32 *", "txn79":"r-4 w-4 r-44 w-44 *", "txn80":"r-2 w-2 r-30 w-30 *", "txn81":"r-1 w-1 r-19 w-19 *", "txn82":"r-2 w-2 r-22 w-22 *", "txn83":"r-4 w-4 r-41 w-41 *", "txn84":"r-3 w-3 r-33 w-33 *", "txn85":"r-3 w-3 r-34 w-34 *", "txn86":"r-1 w-1 r-18 w-18 *", "txn87":"r-3 w-3 r-39 w-39 *", "txn88":"r-3 w-3 r-38 w-38 *", "txn89":"r-2 w-2 r-24 w-24 *", "txn90":"r-4 w-4 r-46 w-46 *", "txn91":"r-4 w-4 r-49 w-49 *", "txn92":"r-4 w-4 r-43 w-43 *", "txn93":"r-4 w-4 r-47 w-47 *", "txn94":"r-2 w-2 r-28 w-28 *", "txn95":"r-4 w-4 r-41 w-41 *", "txn96":"r-3 w-3 r-39 w-39 *", "txn97":"r-1 w-1 r-15 w-15 *", "txn98":"r-1 w-1 r-11 w-11 *", "txn99":"r-3 w-3 r-39 w-39 *"}'
|
| 11 |
+
|
| 12 |
+
WORKLOAD_3 = '{"txn0":"r-4 * * * * * * * w-6", "txn1":"r-3 * * * * * * * w-7", "txn2":"r-5 * * * * * * * w-9", "txn3":"r-2 * * * * * * * w-8", "txn4":"r-3 * * * * * * * w-7", "txn5":"r-1 * * * * * * * w-8", "txn6":"r-1 * * * * * * * w-9", "txn7":"r-2 * * * * * * * w-7", "txn8":"r-5 * * * * * * * w-9", "txn9":"r-1 * * * * * * * w-7", "txn10":"r-3 * * * * * * * w-7", "txn11":"r-2 * * * * * * * w-9", "txn12":"r-1 * * * * * * * w-6", "txn13":"r-5 * * * * * * * w-6", "txn14":"r-4 * * * * * * * w-10", "txn15":"r-5 * * * * * * * w-10", "txn16":"r-5 * * * * * * * w-10", "txn17":"r-2 * * * * * * * w-6", "txn18":"r-1 * * * * * * * w-7", "txn19":"r-4 * * * * * * * w-6", "txn20":"r-2 * * * * * * * w-8", "txn21":"r-4 * * * * * * * w-10", "txn22":"r-4 * * * * * * * w-7", "txn23":"r-4 * * * * * * * w-7", "txn24":"r-5 * * * * * * * w-6", "txn25":"r-5 * * * * * * * w-10", "txn26":"r-5 * * * * * * * w-8", "txn27":"r-2 * * * * * * * w-9", "txn28":"r-5 * * * * * * * w-8", "txn29":"r-1 * * * * * * * w-8", "txn30":"r-4 * * * * * * * w-7", "txn31":"r-3 * * * * * * * w-10", "txn32":"r-3 * * * * * * * w-6", "txn33":"r-1 * * * * * * * w-6", "txn34":"r-4 * * * * * * * w-7", "txn35":"r-5 * * * * * * * w-7", "txn36":"r-3 * * * * * * * w-7", "txn37":"r-1 * * * * * * * w-8", "txn38":"r-3 * * * * * * * w-6", "txn39":"r-2 * * * * * * * w-6", "txn40":"r-1 * * * * * * * w-8", "txn41":"r-1 * * * * * * * w-10", "txn42":"r-5 * * * * * * * w-6", "txn43":"r-2 * * * * * * * w-6", "txn44":"r-3 * * * * * * * w-6", "txn45":"r-2 * * * * * * * w-6", "txn46":"r-5 * * * * * * * w-6", "txn47":"r-1 * * * * * * * w-9", "txn48":"r-2 * * * * * * * w-8", "txn49":"r-1 * * * * * * * w-10", "txn50":"r-6 * * * * * * * w-1", "txn51":"r-9 * * * * * * * w-2", "txn52":"r-6 * * * * * * * w-4", "txn53":"r-6 * * * * * * * w-1", "txn54":"r-7 * * * * * * * w-5", "txn55":"r-8 * * * * * * * w-1", "txn56":"r-9 * * * * * * * w-3", "txn57":"r-8 * * * * * * * w-5", "txn58":"r-8 * * * * * * * w-3", "txn59":"r-10 * * * * * * * w-1", "txn60":"r-8 * * * * * * * w-1", "txn61":"r-6 * * * * * * * w-2", "txn62":"r-10 * * * * * * * w-2", "txn63":"r-9 * * * * * * * w-3", "txn64":"r-9 * * * * * * * w-3", "txn65":"r-8 * * * * * * * w-2", "txn66":"r-6 * * * * * * * w-4", "txn67":"r-8 * * * * * * * w-2", "txn68":"r-9 * * * * * * * w-3", "txn69":"r-9 * * * * * * * w-2", "txn70":"r-6 * * * * * * * w-5", "txn71":"r-9 * * * * * * * w-1", "txn72":"r-10 * * * * * * * w-2", "txn73":"r-9 * * * * * * * w-1", "txn74":"r-6 * * * * * * * w-1", "txn75":"r-7 * * * * * * * w-5", "txn76":"r-7 * * * * * * * w-5", "txn77":"r-7 * * * * * * * w-2", "txn78":"r-10 * * * * * * * w-5", "txn79":"r-9 * * * * * * * w-3", "txn80":"r-10 * * * * * * * w-3", "txn81":"r-10 * * * * * * * w-2", "txn82":"r-7 * * * * * * * w-5", "txn83":"r-9 * * * * * * * w-4", "txn84":"r-8 * * * * * * * w-3", "txn85":"r-9 * * * * * * * w-3", "txn86":"r-10 * * * * * * * w-2", "txn87":"r-8 * * * * * * * w-2", "txn88":"r-10 * * * * * * * w-2", "txn89":"r-8 * * * * * * * w-5", "txn90":"r-10 * * * * * * * w-1", "txn91":"r-7 * * * * * * * w-1", "txn92":"r-6 * * * * * * * w-2", "txn93":"r-10 * * * * * * * w-5", "txn94":"r-10 * * * * * * * w-4", "txn95":"r-9 * * * * * * * w-2", "txn96":"r-7 * * * * * * * w-2", "txn97":"r-8 * * * * * * * w-3", "txn98":"r-9 * * * * * * * w-2", "txn99":"r-7 * * * * * * * w-2"}'
|
benchmarks/ADRS/txn_scheduling/evaluator/wrapper.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Backwards-compat wrapper for old Python-based evaluators.
|
| 2 |
+
|
| 3 |
+
Old-style evaluators define ``evaluate(program_path) -> dict``. This module
|
| 4 |
+
bridges that interface to the container JSON protocol expected by
|
| 5 |
+
ContainerizedEvaluator.
|
| 6 |
+
|
| 7 |
+
Usage — add this to the bottom of your evaluator.py::
|
| 8 |
+
|
| 9 |
+
if __name__ == "__main__":
|
| 10 |
+
from wrapper import run
|
| 11 |
+
run(evaluate)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import sys
|
| 16 |
+
import traceback
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def run(evaluate_fn):
|
| 20 |
+
"""Call *evaluate_fn*, format the result as container-protocol JSON on stdout.
|
| 21 |
+
|
| 22 |
+
* Reads ``sys.argv[1]`` as the program path.
|
| 23 |
+
* Redirects stdout → stderr while *evaluate_fn* runs so that debug prints
|
| 24 |
+
don't contaminate the JSON output.
|
| 25 |
+
* Separates numeric metrics from non-numeric artifacts.
|
| 26 |
+
* Guarantees ``combined_score`` is always present in metrics.
|
| 27 |
+
"""
|
| 28 |
+
if len(sys.argv) < 2:
|
| 29 |
+
print("Usage: evaluator.py <program_path>", file=sys.stderr)
|
| 30 |
+
sys.exit(1)
|
| 31 |
+
|
| 32 |
+
program_path = sys.argv[1]
|
| 33 |
+
|
| 34 |
+
# Redirect stdout → stderr during evaluation so debug prints from
|
| 35 |
+
# the evaluator don't contaminate the JSON output on stdout.
|
| 36 |
+
real_stdout = sys.stdout
|
| 37 |
+
sys.stdout = sys.stderr
|
| 38 |
+
try:
|
| 39 |
+
result = evaluate_fn(program_path)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
sys.stdout = real_stdout
|
| 42 |
+
print(
|
| 43 |
+
json.dumps(
|
| 44 |
+
{
|
| 45 |
+
"status": "error",
|
| 46 |
+
"combined_score": 0.0,
|
| 47 |
+
"metrics": {"combined_score": 0.0},
|
| 48 |
+
"artifacts": {
|
| 49 |
+
"error": str(e),
|
| 50 |
+
"traceback": traceback.format_exc(),
|
| 51 |
+
},
|
| 52 |
+
}
|
| 53 |
+
)
|
| 54 |
+
)
|
| 55 |
+
return
|
| 56 |
+
sys.stdout = real_stdout
|
| 57 |
+
|
| 58 |
+
if not isinstance(result, dict):
|
| 59 |
+
print(
|
| 60 |
+
json.dumps(
|
| 61 |
+
{
|
| 62 |
+
"status": "error",
|
| 63 |
+
"combined_score": 0.0,
|
| 64 |
+
"metrics": {"combined_score": 0.0},
|
| 65 |
+
"artifacts": {
|
| 66 |
+
"error": f"evaluate() returned {type(result).__name__}, expected dict"
|
| 67 |
+
},
|
| 68 |
+
}
|
| 69 |
+
)
|
| 70 |
+
)
|
| 71 |
+
return
|
| 72 |
+
|
| 73 |
+
# Separate numeric metrics from non-numeric artifacts.
|
| 74 |
+
metrics = {}
|
| 75 |
+
artifacts = {}
|
| 76 |
+
for k, v in result.items():
|
| 77 |
+
if isinstance(v, bool):
|
| 78 |
+
metrics[k] = float(v)
|
| 79 |
+
elif isinstance(v, (int, float)):
|
| 80 |
+
metrics[k] = float(v)
|
| 81 |
+
elif isinstance(v, str):
|
| 82 |
+
artifacts[k] = v
|
| 83 |
+
elif isinstance(v, (list, dict)):
|
| 84 |
+
artifacts[k] = json.dumps(v)
|
| 85 |
+
|
| 86 |
+
if "combined_score" not in metrics:
|
| 87 |
+
metrics["combined_score"] = 0.0
|
| 88 |
+
|
| 89 |
+
status = "error" if "error" in artifacts else "success"
|
| 90 |
+
output = {
|
| 91 |
+
"status": status,
|
| 92 |
+
"combined_score": metrics["combined_score"],
|
| 93 |
+
"metrics": metrics,
|
| 94 |
+
}
|
| 95 |
+
if artifacts:
|
| 96 |
+
output["artifacts"] = artifacts
|
| 97 |
+
|
| 98 |
+
print(json.dumps(output))
|
benchmarks/ADRS/txn_scheduling/initial_program.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
from txn_simulator import Workload
|
| 4 |
+
from workloads import WORKLOAD_1, WORKLOAD_2, WORKLOAD_3
|
| 5 |
+
|
| 6 |
+
# EVOLVE-BLOCK-START
|
| 7 |
+
|
| 8 |
+
def get_best_schedule(workload, num_seqs):
|
| 9 |
+
"""
|
| 10 |
+
Get optimal schedule using greedy cost sampling strategy.
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
Tuple of (lowest makespan, corresponding schedule)
|
| 14 |
+
"""
|
| 15 |
+
def get_greedy_cost_sampled(num_samples, sample_rate):
|
| 16 |
+
# greedy with random starting point
|
| 17 |
+
start_txn = random.randint(0, workload.num_txns - 1)
|
| 18 |
+
txn_seq = [start_txn]
|
| 19 |
+
remaining_txns = [x for x in range(0, workload.num_txns)]
|
| 20 |
+
remaining_txns.remove(start_txn)
|
| 21 |
+
running_cost = workload.txns[start_txn][0][3]
|
| 22 |
+
# min_costs = []
|
| 23 |
+
# key_map, total_cost = workload.get_incremental_seq_cost(start_txn, {}, 0)
|
| 24 |
+
for i in range(0, workload.num_txns - 1):
|
| 25 |
+
min_cost = 100000 # MAX
|
| 26 |
+
min_relative_cost = 10
|
| 27 |
+
min_txn = -1
|
| 28 |
+
# min_index = 0
|
| 29 |
+
holdout_txns = []
|
| 30 |
+
done = False
|
| 31 |
+
key_maps = []
|
| 32 |
+
|
| 33 |
+
sample = random.random()
|
| 34 |
+
if sample > sample_rate:
|
| 35 |
+
idx = random.randint(0, len(remaining_txns) - 1)
|
| 36 |
+
t = remaining_txns[idx]
|
| 37 |
+
txn_seq.append(t)
|
| 38 |
+
remaining_txns.pop(idx)
|
| 39 |
+
continue
|
| 40 |
+
|
| 41 |
+
for j in range(0, num_samples):
|
| 42 |
+
idx = 0
|
| 43 |
+
if len(remaining_txns) > 1:
|
| 44 |
+
idx = random.randint(0, len(remaining_txns) - 1)
|
| 45 |
+
else:
|
| 46 |
+
done = True
|
| 47 |
+
t = remaining_txns[idx]
|
| 48 |
+
holdout_txns.append(remaining_txns.pop(idx))
|
| 49 |
+
if workload.debug:
|
| 50 |
+
print(remaining_txns, holdout_txns)
|
| 51 |
+
txn_len = workload.txns[t][0][3]
|
| 52 |
+
test_seq = txn_seq.copy()
|
| 53 |
+
test_seq.append(t)
|
| 54 |
+
cost = 0
|
| 55 |
+
cost = workload.get_opt_seq_cost(test_seq)
|
| 56 |
+
if cost < min_cost:
|
| 57 |
+
# if relative_cost < min_relative_cost:
|
| 58 |
+
min_cost = cost
|
| 59 |
+
min_txn = t
|
| 60 |
+
# min_relative_cost = relative_cost
|
| 61 |
+
# min_index = j
|
| 62 |
+
if done:
|
| 63 |
+
break
|
| 64 |
+
assert(min_txn != -1)
|
| 65 |
+
running_cost = min_cost
|
| 66 |
+
txn_seq.append(min_txn)
|
| 67 |
+
holdout_txns.remove(min_txn)
|
| 68 |
+
remaining_txns.extend(holdout_txns)
|
| 69 |
+
|
| 70 |
+
if workload.debug:
|
| 71 |
+
print("min: ", min_txn, remaining_txns, holdout_txns, txn_seq)
|
| 72 |
+
if workload.debug:
|
| 73 |
+
print(txn_seq)
|
| 74 |
+
print(len(set(txn_seq)))
|
| 75 |
+
assert len(set(txn_seq)) == workload.num_txns
|
| 76 |
+
# print(txn_seq)
|
| 77 |
+
|
| 78 |
+
overall_cost = workload.get_opt_seq_cost(txn_seq)
|
| 79 |
+
|
| 80 |
+
return overall_cost, txn_seq
|
| 81 |
+
|
| 82 |
+
return get_greedy_cost_sampled(10, 1.0)
|
| 83 |
+
|
| 84 |
+
# EVOLVE-BLOCK-END
|
| 85 |
+
|
| 86 |
+
def get_random_costs():
|
| 87 |
+
workload_size = 100
|
| 88 |
+
workload = Workload(WORKLOAD_1)
|
| 89 |
+
|
| 90 |
+
makespan1, schedule1 = get_best_schedule(workload, 10)
|
| 91 |
+
cost1 = workload.get_opt_seq_cost(schedule1)
|
| 92 |
+
|
| 93 |
+
workload2 = Workload(WORKLOAD_2)
|
| 94 |
+
makespan2, schedule2 = get_best_schedule(workload2, 10)
|
| 95 |
+
cost2 = workload2.get_opt_seq_cost(schedule2)
|
| 96 |
+
|
| 97 |
+
workload3 = Workload(WORKLOAD_3)
|
| 98 |
+
makespan3, schedule3 = get_best_schedule(workload3, 10)
|
| 99 |
+
cost3 = workload3.get_opt_seq_cost(schedule3)
|
| 100 |
+
print(cost1, cost2, cost3)
|
| 101 |
+
return cost1 + cost2 + cost3, [schedule1, schedule2, schedule3]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
if __name__ == "__main__":
|
| 105 |
+
makespan, schedule = get_random_costs()
|
| 106 |
+
print(f"Makespan: {makespan}")
|
benchmarks/ale_bench/README.md
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ALE-Bench: AtCoder Heuristic Contest Benchmark
|
| 2 |
+
|
| 3 |
+
10 problems from AtCoder Heuristic Contests (AHC), evaluated via the `ale_bench` package. Programs are written in C++ and scored on 50 public test cases during evolution. A separate private evaluator runs the full hidden test set for final ranking.
|
| 4 |
+
|
| 5 |
+
## Problems
|
| 6 |
+
|
| 7 |
+
| Problem | Description |
|
| 8 |
+
|---------|-------------|
|
| 9 |
+
| `ahc008` | Pet partitioning — place walls to create pet-free areas on a 30×30 grid over 300 turns |
|
| 10 |
+
| `ahc011` | AtCoder Heuristic Contest 11 |
|
| 11 |
+
| `ahc015` | AtCoder Heuristic Contest 15 |
|
| 12 |
+
| `ahc016` | AtCoder Heuristic Contest 16 |
|
| 13 |
+
| `ahc024` | AtCoder Heuristic Contest 24 |
|
| 14 |
+
| `ahc025` | Balance weighing — use a balance scale to divide N items into D equal-weight sets using Q queries |
|
| 15 |
+
| `ahc026` | AtCoder Heuristic Contest 26 |
|
| 16 |
+
| `ahc027` | AtCoder Heuristic Contest 27 |
|
| 17 |
+
| `ahc039` | AtCoder Heuristic Contest 39 |
|
| 18 |
+
| `ahc046` | AtCoder Heuristic Contest 46 |
|
| 19 |
+
|
| 20 |
+
## Quick Start
|
| 21 |
+
|
| 22 |
+
Run evolution on a single problem:
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
uv run skydiscover-run \
|
| 26 |
+
benchmarks/ale_bench/ale-bench-lite-problems/ahc025/initial_program.cpp \
|
| 27 |
+
benchmarks/ale_bench/ale-bench-lite-problems/ahc025/evaluator.py \
|
| 28 |
+
-c benchmarks/ale_bench/ale-bench-lite-problems/ahc025/config.yaml \
|
| 29 |
+
--search evox \
|
| 30 |
+
-i 100
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
## Scoring
|
| 34 |
+
|
| 35 |
+
During evolution, each iteration runs 50 public test cases:
|
| 36 |
+
|
| 37 |
+
```
|
| 38 |
+
combined_score = overall_absolute_score * optim_factor / num_public_cases
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
`optim_factor` is `+1` for maximize problems and `-1` for minimize problems (so `combined_score` is always higher-is-better).
|
| 42 |
+
|
| 43 |
+
## Private Evaluation
|
| 44 |
+
|
| 45 |
+
After evolution, evaluate the best program on the full private test set:
|
| 46 |
+
|
| 47 |
+
```bash
|
| 48 |
+
python benchmarks/ale_bench/private_eval.py \
|
| 49 |
+
--program-path path/to/best_program.cpp \
|
| 50 |
+
--problem-id ahc025
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
This runs 3 independent evaluations and reports the average private rank, performance score, and per-case pass/fail counts.
|
| 54 |
+
|
| 55 |
+
## Directory Structure
|
| 56 |
+
|
| 57 |
+
```
|
| 58 |
+
ale_bench/
|
| 59 |
+
├── ale-bench-lite-problems/
|
| 60 |
+
│ └── ahcXXX/
|
| 61 |
+
│ ├── initial_program.cpp # Starting C++ solution
|
| 62 |
+
│ ├── evaluator.py # Runs 50 public cases via ale_bench
|
| 63 |
+
│ └── config.yaml # Search config (cpp, diff-based, 100 iterations)
|
| 64 |
+
├── ale_agent_best/
|
| 65 |
+
│ └── ahcXXX.cpp # Best known solutions (reference)
|
| 66 |
+
└── private_eval.py # Full private set evaluation + ranking
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
## Requirements
|
| 70 |
+
|
| 71 |
+
Requires the `ale_bench` and `ale_bench_eval` packages. These are not in the default `uv sync` — install them separately per the ALE-Bench documentation.
|
| 72 |
+
|
| 73 |
+
## Config Defaults
|
| 74 |
+
|
| 75 |
+
All problems share the same base config:
|
| 76 |
+
|
| 77 |
+
```yaml
|
| 78 |
+
language: cpp
|
| 79 |
+
diff_based_evolution: true
|
| 80 |
+
max_iterations: 100
|
| 81 |
+
max_solution_length: 60000
|
| 82 |
+
evaluator:
|
| 83 |
+
timeout: 10000
|
| 84 |
+
```
|
benchmarks/ale_bench/ale-bench-lite-problems/ahc008/best_program.cpp
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <algorithm>
|
| 6 |
+
// #include <map>
|
| 7 |
+
// #include <set>
|
| 8 |
+
#include <queue>
|
| 9 |
+
#include <cmath>
|
| 10 |
+
#include <iomanip>
|
| 11 |
+
#include <limits>
|
| 12 |
+
|
| 13 |
+
// --- Constants ---
|
| 14 |
+
constexpr int GRID_SIZE = 30;
|
| 15 |
+
constexpr int NUM_TURNS = 300;
|
| 16 |
+
constexpr int INF = std::numeric_limits<int>::max();
|
| 17 |
+
|
| 18 |
+
struct Point {
|
| 19 |
+
int r, c;
|
| 20 |
+
|
| 21 |
+
bool operator==(const Point& other) const { return r == other.r && c == other.c; }
|
| 22 |
+
bool operator!=(const Point& other) const { return !(*this == other); }
|
| 23 |
+
bool operator<(const Point& other) const {
|
| 24 |
+
if (r != other.r) return r < other.r;
|
| 25 |
+
return c < other.c;
|
| 26 |
+
}
|
| 27 |
+
};
|
| 28 |
+
const Point INVALID_POINT = {-1, -1};
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
/* Tunable parameters:
|
| 32 |
+
- Keep strong penalty for standing outside inner safe region while evaluating stand cells.
|
| 33 |
+
- Remove adjacency bias to avoid over-focusing on contiguous walls; reduces conflicts and detours.
|
| 34 |
+
- Allow more turns before declaring "stuck" to finish longer routes to build spots. */
|
| 35 |
+
constexpr int STAND_OUTSIDE_INNER_SAFE_PENALTY = 1000;
|
| 36 |
+
constexpr int ADJACENT_WALL_PRIORITY_BONUS = 0;
|
| 37 |
+
constexpr int NEAR_PET_PENALTY_POINTS_PER_PET = 0;
|
| 38 |
+
constexpr int NEAR_PET_RADIUS = 2;
|
| 39 |
+
constexpr int MAX_STUCK_TURNS = 10;
|
| 40 |
+
|
| 41 |
+
// Directions: Up, Down, Left, Right (indices 0, 1, 2, 3)
|
| 42 |
+
const Point DIRS[4] = {{-1, 0}, {1, 0}, {0, -1}, {0, 1}};
|
| 43 |
+
const char DIR_CHARS_BUILD[4] = {'u', 'd', 'l', 'r'};
|
| 44 |
+
const char DIR_CHARS_MOVE[4] = {'U', 'D', 'L', 'R'};
|
| 45 |
+
const char PET_MOVE_CHARS[4] = {'U', 'D', 'L', 'R'};
|
| 46 |
+
|
| 47 |
+
struct PetInfo {
|
| 48 |
+
Point pos;
|
| 49 |
+
int type;
|
| 50 |
+
int id;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
enum class HumanObjective {
|
| 54 |
+
BUILDING_WALLS,
|
| 55 |
+
GOING_TO_SAFE_SPOT,
|
| 56 |
+
STAYING_IN_SAFE_SPOT,
|
| 57 |
+
REPOSITIONING_STUCK
|
| 58 |
+
// FLEEING_PET_IN_PEN removed, simplified objective setting
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct HumanInfo {
|
| 62 |
+
Point pos;
|
| 63 |
+
int id;
|
| 64 |
+
|
| 65 |
+
int strip_r_start;
|
| 66 |
+
int strip_r_end;
|
| 67 |
+
|
| 68 |
+
Point inner_safe_ul;
|
| 69 |
+
Point inner_safe_br;
|
| 70 |
+
Point final_stand_pos;
|
| 71 |
+
|
| 72 |
+
std::vector<Point> assigned_wall_cells;
|
| 73 |
+
HumanObjective objective;
|
| 74 |
+
int turns_stuck_building = 0;
|
| 75 |
+
int next_build_idx = 0;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
// --- Game Grid and State ---
|
| 79 |
+
bool is_impassable_grid_static[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 80 |
+
std::vector<PetInfo> pets_global_state;
|
| 81 |
+
std::vector<HumanInfo> humans_global_state;
|
| 82 |
+
int N_pets_global, M_humans_global;
|
| 83 |
+
|
| 84 |
+
Point bfs_parent_grid[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 85 |
+
bool bfs_visited_grid[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
// --- Utility Functions ---
|
| 89 |
+
bool is_valid_coord(int val) {
|
| 90 |
+
return val >= 1 && val <= GRID_SIZE;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
bool is_valid_point(Point p) {
|
| 94 |
+
return is_valid_coord(p.r) && is_valid_coord(p.c);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
int manhattan_distance(Point p1, Point p2) {
|
| 98 |
+
if (!is_valid_point(p1) || !is_valid_point(p2)) return INF;
|
| 99 |
+
return std::abs(p1.r - p2.r) + std::abs(p1.c - p2.c);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
int count_adjacent_walls_or_boundaries(Point p) {
|
| 103 |
+
int count = 0;
|
| 104 |
+
for (int i = 0; i < 4; ++i) {
|
| 105 |
+
Point neighbor = {p.r + DIRS[i].r, p.c + DIRS[i].c};
|
| 106 |
+
if (!is_valid_point(neighbor) || (is_valid_point(neighbor) && is_impassable_grid_static[neighbor.r][neighbor.c])) {
|
| 107 |
+
count++;
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
return count;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
bool can_theoretically_build_at(Point wall_pos, int builder_human_id) {
|
| 114 |
+
if (!is_valid_point(wall_pos)) return false;
|
| 115 |
+
if (is_impassable_grid_static[wall_pos.r][wall_pos.c]) return false;
|
| 116 |
+
|
| 117 |
+
for (const auto& pet : pets_global_state) {
|
| 118 |
+
if (pet.pos == wall_pos) return false;
|
| 119 |
+
if (manhattan_distance(wall_pos, pet.pos) == 1) return false;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
for (const auto& human : humans_global_state) {
|
| 123 |
+
if (human.id == builder_human_id) continue; // Builder themself can be adjacent
|
| 124 |
+
if (human.pos == wall_pos) return false; // Other human on the wall_pos
|
| 125 |
+
}
|
| 126 |
+
return true;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
/* is_inside_strip:
|
| 130 |
+
- Returns whether point p lies within the row strip assigned to human h.
|
| 131 |
+
- Keeps builders mostly within their own horizontal band to reduce interference. */
|
| 132 |
+
bool is_inside_strip(Point p, const HumanInfo& h) {
|
| 133 |
+
return is_valid_point(p) && p.r >= h.strip_r_start && p.r <= h.strip_r_end;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
char get_bfs_move_char(Point start_pos, Point target_pos,
|
| 137 |
+
const std::vector<Point>& current_turn_tentative_walls) {
|
| 138 |
+
/* Docstring:
|
| 139 |
+
Breadth-first search from start_pos to target_pos over passable cells, avoiding
|
| 140 |
+
any cells that are scheduled to become walls this turn (current_turn_tentative_walls).
|
| 141 |
+
Returns the first-step move char toward target, or '.' if already at target or no path. */
|
| 142 |
+
if (start_pos == target_pos) return '.';
|
| 143 |
+
|
| 144 |
+
std::queue<Point> q;
|
| 145 |
+
q.push(start_pos);
|
| 146 |
+
|
| 147 |
+
for(int r_bfs = 1; r_bfs <= GRID_SIZE; ++r_bfs) for(int c_bfs = 1; c_bfs <= GRID_SIZE; ++c_bfs) {
|
| 148 |
+
bfs_visited_grid[r_bfs][c_bfs] = false;
|
| 149 |
+
bfs_parent_grid[r_bfs][c_bfs] = INVALID_POINT;
|
| 150 |
+
}
|
| 151 |
+
if (!is_valid_point(start_pos)) return '.';
|
| 152 |
+
bfs_visited_grid[start_pos.r][start_pos.c] = true;
|
| 153 |
+
|
| 154 |
+
Point path_found_dest = INVALID_POINT;
|
| 155 |
+
|
| 156 |
+
while(!q.empty()){
|
| 157 |
+
Point curr = q.front();
|
| 158 |
+
q.pop();
|
| 159 |
+
|
| 160 |
+
for(int i_dir=0; i_dir < 4; ++i_dir){
|
| 161 |
+
Point next_p = {curr.r + DIRS[i_dir].r, curr.c + DIRS[i_dir].c};
|
| 162 |
+
|
| 163 |
+
if(is_valid_point(next_p) &&
|
| 164 |
+
!is_impassable_grid_static[next_p.r][next_p.c] &&
|
| 165 |
+
!bfs_visited_grid[next_p.r][next_p.c]){
|
| 166 |
+
|
| 167 |
+
bool is_tentative_wall_conflict = false;
|
| 168 |
+
for(const auto& tw : current_turn_tentative_walls) {
|
| 169 |
+
if(next_p == tw) {
|
| 170 |
+
is_tentative_wall_conflict = true;
|
| 171 |
+
break;
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
if(is_tentative_wall_conflict) continue;
|
| 175 |
+
|
| 176 |
+
bfs_visited_grid[next_p.r][next_p.c] = true;
|
| 177 |
+
bfs_parent_grid[next_p.r][next_p.c] = curr;
|
| 178 |
+
|
| 179 |
+
if (next_p == target_pos) {
|
| 180 |
+
path_found_dest = next_p;
|
| 181 |
+
goto bfs_done_label;
|
| 182 |
+
}
|
| 183 |
+
q.push(next_p);
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
bfs_done_label:;
|
| 189 |
+
if (path_found_dest.r == -1) return '.';
|
| 190 |
+
|
| 191 |
+
Point current_step_in_path = path_found_dest;
|
| 192 |
+
while(!(bfs_parent_grid[current_step_in_path.r][current_step_in_path.c] == INVALID_POINT) &&
|
| 193 |
+
!(bfs_parent_grid[current_step_in_path.r][current_step_in_path.c] == start_pos)) {
|
| 194 |
+
current_step_in_path = bfs_parent_grid[current_step_in_path.r][current_step_in_path.c];
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
for(int i_dir = 0; i_dir < 4; ++i_dir){
|
| 198 |
+
if(start_pos.r + DIRS[i_dir].r == current_step_in_path.r &&
|
| 199 |
+
start_pos.c + DIRS[i_dir].c == current_step_in_path.c){
|
| 200 |
+
return DIR_CHARS_MOVE[i_dir];
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
return '.';
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
/* compute_distances_from:
|
| 207 |
+
- BFS from start_pos over passable cells, avoiding cells that will become walls this turn.
|
| 208 |
+
- Fills dist[r][c] with shortest steps or INF if unreachable.
|
| 209 |
+
- Used to evaluate candidate stand positions with true reachability instead of Manhattan. */
|
| 210 |
+
void compute_distances_from(Point start_pos, const std::vector<Point>& current_turn_tentative_walls, int dist[GRID_SIZE + 1][GRID_SIZE + 1]) {
|
| 211 |
+
for (int r = 1; r <= GRID_SIZE; ++r) {
|
| 212 |
+
for (int c = 1; c <= GRID_SIZE; ++c) {
|
| 213 |
+
dist[r][c] = INF;
|
| 214 |
+
}
|
| 215 |
+
}
|
| 216 |
+
if (!is_valid_point(start_pos)) return;
|
| 217 |
+
std::queue<Point> q;
|
| 218 |
+
dist[start_pos.r][start_pos.c] = 0;
|
| 219 |
+
q.push(start_pos);
|
| 220 |
+
while (!q.empty()) {
|
| 221 |
+
Point cur = q.front(); q.pop();
|
| 222 |
+
for (int dir = 0; dir < 4; ++dir) {
|
| 223 |
+
Point nxt = {cur.r + DIRS[dir].r, cur.c + DIRS[dir].c};
|
| 224 |
+
if (!is_valid_point(nxt)) continue;
|
| 225 |
+
if (is_impassable_grid_static[nxt.r][nxt.c]) continue;
|
| 226 |
+
bool banned = false;
|
| 227 |
+
for (const auto& tw : current_turn_tentative_walls) {
|
| 228 |
+
if (tw == nxt) { banned = true; break; }
|
| 229 |
+
}
|
| 230 |
+
if (banned) continue;
|
| 231 |
+
if (dist[nxt.r][nxt.c] != INF) continue;
|
| 232 |
+
dist[nxt.r][nxt.c] = dist[cur.r][cur.c] + 1;
|
| 233 |
+
q.push(nxt);
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
void initialize_game() {
|
| 240 |
+
/* Initialize global state:
|
| 241 |
+
- Read pets and humans.
|
| 242 |
+
- Partition rows into strips per human.
|
| 243 |
+
- Assign horizontal border cells of each strip as build targets to create partitions. */
|
| 244 |
+
std::cin >> N_pets_global;
|
| 245 |
+
pets_global_state.resize(N_pets_global);
|
| 246 |
+
for (int i = 0; i < N_pets_global; ++i) {
|
| 247 |
+
pets_global_state[i].id = i;
|
| 248 |
+
std::cin >> pets_global_state[i].pos.r >> pets_global_state[i].pos.c >> pets_global_state[i].type;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
std::cin >> M_humans_global;
|
| 252 |
+
humans_global_state.resize(M_humans_global);
|
| 253 |
+
|
| 254 |
+
for(int r_grid=0; r_grid <= GRID_SIZE; ++r_grid) for(int c_grid=0; c_grid <= GRID_SIZE; ++c_grid) is_impassable_grid_static[r_grid][c_grid] = false;
|
| 255 |
+
|
| 256 |
+
int base_strip_height = GRID_SIZE / M_humans_global;
|
| 257 |
+
int remainder_heights = GRID_SIZE % M_humans_global;
|
| 258 |
+
int current_r_start_coord = 1;
|
| 259 |
+
|
| 260 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 261 |
+
HumanInfo& human = humans_global_state[i];
|
| 262 |
+
human.id = i;
|
| 263 |
+
std::cin >> human.pos.r >> human.pos.c;
|
| 264 |
+
|
| 265 |
+
int strip_h_for_this_human = base_strip_height + (i < remainder_heights ? 1 : 0);
|
| 266 |
+
human.strip_r_start = current_r_start_coord;
|
| 267 |
+
human.strip_r_end = human.strip_r_start + strip_h_for_this_human - 1;
|
| 268 |
+
human.strip_r_end = std::min(human.strip_r_end, GRID_SIZE);
|
| 269 |
+
|
| 270 |
+
int actual_strip_h = human.strip_r_end - human.strip_r_start + 1;
|
| 271 |
+
int actual_strip_w = GRID_SIZE;
|
| 272 |
+
|
| 273 |
+
human.inner_safe_ul.r = human.strip_r_start + (actual_strip_h >= 3 ? 1 : 0);
|
| 274 |
+
human.inner_safe_ul.c = 1 + (actual_strip_w >= 3 ? 1 : 0);
|
| 275 |
+
human.inner_safe_br.r = human.strip_r_end - (actual_strip_h >= 3 ? 1 : 0);
|
| 276 |
+
human.inner_safe_br.c = GRID_SIZE - (actual_strip_w >= 3 ? 1 : 0);
|
| 277 |
+
|
| 278 |
+
if (human.inner_safe_ul.r > human.inner_safe_br.r) human.inner_safe_br.r = human.inner_safe_ul.r;
|
| 279 |
+
if (human.inner_safe_ul.c > human.inner_safe_br.c) human.inner_safe_br.c = human.inner_safe_ul.c;
|
| 280 |
+
|
| 281 |
+
human.final_stand_pos = {
|
| 282 |
+
human.inner_safe_ul.r + (human.inner_safe_br.r - human.inner_safe_ul.r) / 2,
|
| 283 |
+
human.inner_safe_ul.c + (human.inner_safe_br.c - human.inner_safe_ul.c) / 2
|
| 284 |
+
};
|
| 285 |
+
human.final_stand_pos.r = std::max(human.inner_safe_ul.r, std::min(human.inner_safe_br.r, human.final_stand_pos.r));
|
| 286 |
+
human.final_stand_pos.c = std::max(human.inner_safe_ul.c, std::min(human.inner_safe_br.c, human.final_stand_pos.c));
|
| 287 |
+
if (!is_valid_point(human.final_stand_pos)) {
|
| 288 |
+
human.final_stand_pos = {human.strip_r_start, 1};
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
human.assigned_wall_cells.clear();
|
| 292 |
+
int r_s = human.strip_r_start;
|
| 293 |
+
int r_e = human.strip_r_end;
|
| 294 |
+
|
| 295 |
+
/* Assign boundary rows using parity to cooperate with neighbors.
|
| 296 |
+
- For shared row r_s (i>0) and r_e (i<M-1), take columns where (c % 2) == (i & 1).
|
| 297 |
+
- Skip outermost borders to keep area large.
|
| 298 |
+
- No vertical edges to avoid shrinking strip area. */
|
| 299 |
+
if (i > 0) {
|
| 300 |
+
for (int c_coord = 1; c_coord <= GRID_SIZE; ++c_coord) {
|
| 301 |
+
if ((c_coord & 1) == (i & 1)) human.assigned_wall_cells.push_back({r_s, c_coord});
|
| 302 |
+
}
|
| 303 |
+
}
|
| 304 |
+
if (i < M_humans_global - 1) {
|
| 305 |
+
for (int c_coord = 1; c_coord <= GRID_SIZE; ++c_coord) {
|
| 306 |
+
if ((c_coord & 1) == (i & 1)) human.assigned_wall_cells.push_back({r_e, c_coord});
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
std::sort(human.assigned_wall_cells.begin(), human.assigned_wall_cells.end());
|
| 311 |
+
human.assigned_wall_cells.erase(
|
| 312 |
+
std::unique(human.assigned_wall_cells.begin(), human.assigned_wall_cells.end()),
|
| 313 |
+
human.assigned_wall_cells.end()
|
| 314 |
+
);
|
| 315 |
+
current_r_start_coord = human.strip_r_end + 1;
|
| 316 |
+
}
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
std::string decide_human_actions() {
|
| 320 |
+
/* Docstring:
|
| 321 |
+
Each turn, for each human:
|
| 322 |
+
- If build targets remain, choose a reachable adjacent stand cell to a legal wall cell
|
| 323 |
+
(respecting pet adjacency rules and intra-turn conflicts) via BFS and either build or move.
|
| 324 |
+
- Use tentative walls to avoid planning moves into cells that are becoming impassable.
|
| 325 |
+
- When done building, move to final_stand_pos; standing location does not affect R_i. */
|
| 326 |
+
std::string actions_str(M_humans_global, '.');
|
| 327 |
+
std::vector<Point> tentative_walls_this_turn;
|
| 328 |
+
std::vector<Point> tentative_move_targets_this_turn(M_humans_global, INVALID_POINT);
|
| 329 |
+
|
| 330 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 331 |
+
HumanInfo& human = humans_global_state[i];
|
| 332 |
+
|
| 333 |
+
int unbuilt_walls_count = 0;
|
| 334 |
+
for (const auto& wall_cell : human.assigned_wall_cells) {
|
| 335 |
+
if (is_valid_point(wall_cell) && !is_impassable_grid_static[wall_cell.r][wall_cell.c]) {
|
| 336 |
+
unbuilt_walls_count++;
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
if (unbuilt_walls_count == 0) {
|
| 341 |
+
human.objective = (human.pos == human.final_stand_pos) ?
|
| 342 |
+
HumanObjective::STAYING_IN_SAFE_SPOT :
|
| 343 |
+
HumanObjective::GOING_TO_SAFE_SPOT;
|
| 344 |
+
} else {
|
| 345 |
+
human.objective = HumanObjective::BUILDING_WALLS;
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
if(human.objective == HumanObjective::BUILDING_WALLS && human.turns_stuck_building >= MAX_STUCK_TURNS) {
|
| 349 |
+
human.objective = HumanObjective::REPOSITIONING_STUCK;
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
char chosen_action_for_human_i = '.';
|
| 353 |
+
if (human.objective == HumanObjective::STAYING_IN_SAFE_SPOT) {
|
| 354 |
+
chosen_action_for_human_i = '.';
|
| 355 |
+
} else if (human.objective == HumanObjective::GOING_TO_SAFE_SPOT ||
|
| 356 |
+
human.objective == HumanObjective::REPOSITIONING_STUCK) {
|
| 357 |
+
if(human.objective == HumanObjective::REPOSITIONING_STUCK) human.turns_stuck_building = 0;
|
| 358 |
+
|
| 359 |
+
chosen_action_for_human_i = get_bfs_move_char(human.pos, human.final_stand_pos, tentative_walls_this_turn);
|
| 360 |
+
|
| 361 |
+
} else if (human.objective == HumanObjective::BUILDING_WALLS) {
|
| 362 |
+
/* BUILDING_WALLS policy:
|
| 363 |
+
1) Fast-path: if an adjacent assigned wall cell is legal now, build immediately.
|
| 364 |
+
2) Otherwise, evaluate all candidate (wall, stand) pairs using true BFS distance
|
| 365 |
+
under this turn's tentative walls to choose the quickest reachable build.
|
| 366 |
+
- Skip walls already planned this turn by others.
|
| 367 |
+
- Avoid stand cells conflicting with this turn's planned walls/moves.
|
| 368 |
+
- Keep strong penalty for standing outside inner_safe to prefer safer paths. */
|
| 369 |
+
// 1) Fast-path: adjacent immediate build
|
| 370 |
+
bool did_fast_build = false;
|
| 371 |
+
for (int k_dir = 0; k_dir < 4; ++k_dir) {
|
| 372 |
+
Point wc = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 373 |
+
if (!is_valid_point(wc) || is_impassable_grid_static[wc.r][wc.c]) continue;
|
| 374 |
+
bool is_assigned = false;
|
| 375 |
+
for (const auto& a : human.assigned_wall_cells) { if (a == wc) { is_assigned = true; break; } }
|
| 376 |
+
if (!is_assigned) continue;
|
| 377 |
+
bool already_planned = false;
|
| 378 |
+
for (const auto& tw : tentative_walls_this_turn) { if (tw == wc) { already_planned = true; break; } }
|
| 379 |
+
if (already_planned) continue;
|
| 380 |
+
if (!can_theoretically_build_at(wc, human.id)) continue;
|
| 381 |
+
chosen_action_for_human_i = DIR_CHARS_BUILD[k_dir];
|
| 382 |
+
// Update build cursor to this cell to continue along the row next
|
| 383 |
+
for (int idx = 0; idx < (int)human.assigned_wall_cells.size(); ++idx) {
|
| 384 |
+
if (human.assigned_wall_cells[idx] == wc) { human.next_build_idx = idx; break; }
|
| 385 |
+
}
|
| 386 |
+
did_fast_build = true;
|
| 387 |
+
break;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
// 2) If not building immediately, pick best target via BFS distance
|
| 391 |
+
if (!did_fast_build) {
|
| 392 |
+
int dist[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 393 |
+
compute_distances_from(human.pos, tentative_walls_this_turn, dist);
|
| 394 |
+
|
| 395 |
+
Point best_wall_target = INVALID_POINT;
|
| 396 |
+
Point best_stand_point = INVALID_POINT;
|
| 397 |
+
int best_idx = -1;
|
| 398 |
+
int min_eval_score = INF;
|
| 399 |
+
|
| 400 |
+
int total = (int)human.assigned_wall_cells.size();
|
| 401 |
+
for (int step = 0; step < total; ++step) {
|
| 402 |
+
int idx = (human.next_build_idx + step) % total;
|
| 403 |
+
Point wall_coord = human.assigned_wall_cells[idx];
|
| 404 |
+
if (!is_valid_point(wall_coord) || is_impassable_grid_static[wall_coord.r][wall_coord.c]) continue;
|
| 405 |
+
if (!can_theoretically_build_at(wall_coord, human.id)) continue;
|
| 406 |
+
bool already_planned_wall = false;
|
| 407 |
+
for (const auto& tw : tentative_walls_this_turn) { if (tw == wall_coord) { already_planned_wall = true; break; } }
|
| 408 |
+
if (already_planned_wall) continue;
|
| 409 |
+
|
| 410 |
+
int adj_wall_bonus_val = count_adjacent_walls_or_boundaries(wall_coord) * ADJACENT_WALL_PRIORITY_BONUS;
|
| 411 |
+
|
| 412 |
+
for (int k_dir_idx = 0; k_dir_idx < 4; ++k_dir_idx) {
|
| 413 |
+
Point potential_stand_pos = {wall_coord.r + DIRS[k_dir_idx].r,
|
| 414 |
+
wall_coord.c + DIRS[k_dir_idx].c};
|
| 415 |
+
if (!is_valid_point(potential_stand_pos) || is_impassable_grid_static[potential_stand_pos.r][potential_stand_pos.c]) continue;
|
| 416 |
+
// Stay within own strip whenever possible
|
| 417 |
+
if (!is_inside_strip(potential_stand_pos, human)) continue;
|
| 418 |
+
|
| 419 |
+
bool conflict_with_tentative_wall_build_spot = false;
|
| 420 |
+
for (const auto& tw : tentative_walls_this_turn) { if (potential_stand_pos == tw) { conflict_with_tentative_wall_build_spot = true; break; } }
|
| 421 |
+
if (conflict_with_tentative_wall_build_spot) continue;
|
| 422 |
+
|
| 423 |
+
bool conflict_with_tentative_move_dest = false;
|
| 424 |
+
for (int j = 0; j < i; ++j) {
|
| 425 |
+
if (tentative_move_targets_this_turn[j] == potential_stand_pos) { conflict_with_tentative_move_dest = true; break; }
|
| 426 |
+
}
|
| 427 |
+
if (conflict_with_tentative_move_dest) continue;
|
| 428 |
+
|
| 429 |
+
int d = dist[potential_stand_pos.r][potential_stand_pos.c];
|
| 430 |
+
if (d == INF) continue;
|
| 431 |
+
int current_eval_score = d - adj_wall_bonus_val;
|
| 432 |
+
|
| 433 |
+
bool is_inside_inner_safe_region =
|
| 434 |
+
(potential_stand_pos.r >= human.inner_safe_ul.r &&
|
| 435 |
+
potential_stand_pos.r <= human.inner_safe_br.r &&
|
| 436 |
+
potential_stand_pos.c >= human.inner_safe_ul.c &&
|
| 437 |
+
potential_stand_pos.c <= human.inner_safe_br.c);
|
| 438 |
+
if (!is_inside_inner_safe_region) current_eval_score += STAND_OUTSIDE_INNER_SAFE_PENALTY;
|
| 439 |
+
|
| 440 |
+
if (current_eval_score < min_eval_score) {
|
| 441 |
+
min_eval_score = current_eval_score;
|
| 442 |
+
best_wall_target = wall_coord;
|
| 443 |
+
best_stand_point = potential_stand_pos;
|
| 444 |
+
best_idx = idx;
|
| 445 |
+
} else if (current_eval_score == min_eval_score) {
|
| 446 |
+
int best_bias = (best_idx == -1) ? INF : (best_idx - human.next_build_idx + total) % total;
|
| 447 |
+
int cur_bias = step;
|
| 448 |
+
if (cur_bias < best_bias ||
|
| 449 |
+
(cur_bias == best_bias && (best_wall_target.r == -1 || wall_coord < best_wall_target ||
|
| 450 |
+
(wall_coord == best_wall_target && potential_stand_pos < best_stand_point)))) {
|
| 451 |
+
best_wall_target = wall_coord;
|
| 452 |
+
best_stand_point = potential_stand_pos;
|
| 453 |
+
best_idx = idx;
|
| 454 |
+
}
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
if (best_wall_target.r != -1) {
|
| 460 |
+
human.turns_stuck_building = 0;
|
| 461 |
+
if (human.pos == best_stand_point) {
|
| 462 |
+
for (int k_dir = 0; k_dir < 4; ++k_dir) {
|
| 463 |
+
if (human.pos.r + DIRS[k_dir].r == best_wall_target.r &&
|
| 464 |
+
human.pos.c + DIRS[k_dir].c == best_wall_target.c) {
|
| 465 |
+
chosen_action_for_human_i = DIR_CHARS_BUILD[k_dir];
|
| 466 |
+
// Advance build cursor to reflect this built target
|
| 467 |
+
if (best_idx != -1) human.next_build_idx = best_idx;
|
| 468 |
+
break;
|
| 469 |
+
}
|
| 470 |
+
}
|
| 471 |
+
} else {
|
| 472 |
+
chosen_action_for_human_i = get_bfs_move_char(human.pos, best_stand_point, tentative_walls_this_turn);
|
| 473 |
+
}
|
| 474 |
+
} else {
|
| 475 |
+
if (unbuilt_walls_count > 0) human.turns_stuck_building++;
|
| 476 |
+
if (human.pos != human.final_stand_pos) {
|
| 477 |
+
chosen_action_for_human_i = get_bfs_move_char(human.pos, human.final_stand_pos, tentative_walls_this_turn);
|
| 478 |
+
} else {
|
| 479 |
+
chosen_action_for_human_i = '.';
|
| 480 |
+
}
|
| 481 |
+
}
|
| 482 |
+
}
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
actions_str[i] = chosen_action_for_human_i;
|
| 486 |
+
|
| 487 |
+
if (chosen_action_for_human_i != '.' && (chosen_action_for_human_i == 'u' || chosen_action_for_human_i == 'd' || chosen_action_for_human_i == 'l' || chosen_action_for_human_i == 'r')) {
|
| 488 |
+
for(int k_dir=0; k_dir<4; ++k_dir) {
|
| 489 |
+
if (chosen_action_for_human_i == DIR_CHARS_BUILD[k_dir]) {
|
| 490 |
+
Point built_wall_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 491 |
+
if (is_valid_point(built_wall_pos)) {
|
| 492 |
+
tentative_walls_this_turn.push_back(built_wall_pos);
|
| 493 |
+
}
|
| 494 |
+
break;
|
| 495 |
+
}
|
| 496 |
+
}
|
| 497 |
+
} else if (chosen_action_for_human_i != '.' && (chosen_action_for_human_i == 'U' || chosen_action_for_human_i == 'D' || chosen_action_for_human_i == 'L' || chosen_action_for_human_i == 'R')) {
|
| 498 |
+
for(int k_dir=0; k_dir<4; ++k_dir) {
|
| 499 |
+
if (chosen_action_for_human_i == DIR_CHARS_MOVE[k_dir]) {
|
| 500 |
+
Point target_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 501 |
+
if (is_valid_point(target_pos)) {
|
| 502 |
+
tentative_move_targets_this_turn[i] = target_pos;
|
| 503 |
+
} else {
|
| 504 |
+
actions_str[i] = '.';
|
| 505 |
+
}
|
| 506 |
+
break;
|
| 507 |
+
}
|
| 508 |
+
}
|
| 509 |
+
}
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 513 |
+
if (actions_str[i] != '.' && (actions_str[i] == 'U' || actions_str[i] == 'D' || actions_str[i] == 'L' || actions_str[i] == 'R')) {
|
| 514 |
+
Point target_move_sq = tentative_move_targets_this_turn[i];
|
| 515 |
+
if (target_move_sq.r == -1) {
|
| 516 |
+
actions_str[i] = '.';
|
| 517 |
+
continue;
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
bool conflict_with_wall = false;
|
| 521 |
+
for (const auto& wall_being_built : tentative_walls_this_turn) {
|
| 522 |
+
if (target_move_sq == wall_being_built) {
|
| 523 |
+
conflict_with_wall = true;
|
| 524 |
+
break;
|
| 525 |
+
}
|
| 526 |
+
}
|
| 527 |
+
if (conflict_with_wall) {
|
| 528 |
+
actions_str[i] = '.';
|
| 529 |
+
} else {
|
| 530 |
+
for (int j = 0; j < i; ++j) {
|
| 531 |
+
if (actions_str[j] != '.' && (actions_str[j] == 'U' || actions_str[j] == 'D' || actions_str[j] == 'L' || actions_str[j] == 'R') &&
|
| 532 |
+
tentative_move_targets_this_turn[j] == target_move_sq) {
|
| 533 |
+
actions_str[i] = '.';
|
| 534 |
+
break;
|
| 535 |
+
}
|
| 536 |
+
}
|
| 537 |
+
}
|
| 538 |
+
}
|
| 539 |
+
}
|
| 540 |
+
return actions_str;
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
void apply_actions_and_update_state(const std::string& actions_str_final) {
|
| 544 |
+
/* Apply chosen actions and update state:
|
| 545 |
+
- First apply all builds simultaneously.
|
| 546 |
+
- Then apply all moves that do not collide with new walls.
|
| 547 |
+
- Finally, read and apply pet movements. */
|
| 548 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 549 |
+
char action = actions_str_final[i];
|
| 550 |
+
HumanInfo& human = humans_global_state[i];
|
| 551 |
+
if (action != '.' && (action == 'u' || action == 'd' || action == 'l' || action == 'r')) {
|
| 552 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 553 |
+
if (action == DIR_CHARS_BUILD[k_dir]) {
|
| 554 |
+
Point wall_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 555 |
+
if (is_valid_point(wall_pos) && !is_impassable_grid_static[wall_pos.r][wall_pos.c]) {
|
| 556 |
+
is_impassable_grid_static[wall_pos.r][wall_pos.c] = true;
|
| 557 |
+
}
|
| 558 |
+
break;
|
| 559 |
+
}
|
| 560 |
+
}
|
| 561 |
+
}
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 565 |
+
char action = actions_str_final[i];
|
| 566 |
+
HumanInfo& human = humans_global_state[i];
|
| 567 |
+
if (action != '.' && (action == 'U' || action == 'D' || action == 'L' || action == 'R')) {
|
| 568 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 569 |
+
if (action == DIR_CHARS_MOVE[k_dir]) {
|
| 570 |
+
Point next_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 571 |
+
if (is_valid_point(next_pos) && !is_impassable_grid_static[next_pos.r][next_pos.c]) {
|
| 572 |
+
human.pos = next_pos;
|
| 573 |
+
}
|
| 574 |
+
break;
|
| 575 |
+
}
|
| 576 |
+
}
|
| 577 |
+
}
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
for (int i = 0; i < N_pets_global; ++i) {
|
| 581 |
+
std::string pet_moves_str;
|
| 582 |
+
std::cin >> pet_moves_str;
|
| 583 |
+
if (pet_moves_str == ".") continue;
|
| 584 |
+
|
| 585 |
+
for (char move_char : pet_moves_str) {
|
| 586 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 587 |
+
if(move_char == PET_MOVE_CHARS[k_dir]){
|
| 588 |
+
pets_global_state[i].pos.r += DIRS[k_dir].r;
|
| 589 |
+
pets_global_state[i].pos.c += DIRS[k_dir].c;
|
| 590 |
+
break;
|
| 591 |
+
}
|
| 592 |
+
}
|
| 593 |
+
}
|
| 594 |
+
}
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
int main() {
|
| 598 |
+
std::ios_base::sync_with_stdio(false);
|
| 599 |
+
std::cin.tie(NULL);
|
| 600 |
+
|
| 601 |
+
initialize_game();
|
| 602 |
+
|
| 603 |
+
for (int turn_idx = 0; turn_idx < NUM_TURNS; ++turn_idx) {
|
| 604 |
+
std::string actions_to_perform = decide_human_actions();
|
| 605 |
+
std::cout << actions_to_perform << std::endl;
|
| 606 |
+
|
| 607 |
+
apply_actions_and_update_state(actions_to_perform);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
return 0;
|
| 611 |
+
}
|
| 612 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale-bench-lite-problems/ahc008/evaluator.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import traceback
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from ale_bench.result import CaseResult, JudgeResult, Result
|
| 4 |
+
from ale_bench_eval.safe_ale_session import start_ale_bench_session
|
| 5 |
+
import logging
|
| 6 |
+
import sys
|
| 7 |
+
logger = logging.getLogger(__name__ + "_" + "ALE_BENCH_EVALUATOR")
|
| 8 |
+
|
| 9 |
+
def result_feedback(result: Result) -> CaseResult:
|
| 10 |
+
if result.overall_judge_result == JudgeResult.ACCEPTED:
|
| 11 |
+
return result.case_results[0]
|
| 12 |
+
else:
|
| 13 |
+
selected_case_idx = 0
|
| 14 |
+
for idx, case_result in enumerate(result.case_results):
|
| 15 |
+
if case_result.judge_result == result.overall_judge_result:
|
| 16 |
+
selected_case_idx = idx
|
| 17 |
+
break
|
| 18 |
+
return result.case_results[selected_case_idx]
|
| 19 |
+
|
| 20 |
+
def evaluate(program_path):
|
| 21 |
+
problem_id = "ahc008"
|
| 22 |
+
logger.info(f"Evaluating program {program_path} for problem {problem_id} in ale bench evaluator")
|
| 23 |
+
try:
|
| 24 |
+
session = None
|
| 25 |
+
logger.info("Starting ALE-Bench session")
|
| 26 |
+
session = start_ale_bench_session(
|
| 27 |
+
problem_id=problem_id,
|
| 28 |
+
lite_version=True,
|
| 29 |
+
num_workers=13,
|
| 30 |
+
)
|
| 31 |
+
logger.info("ALE-Bench session started")
|
| 32 |
+
if not session:
|
| 33 |
+
raise RuntimeError("Failed to start or restart the session.")
|
| 34 |
+
optim_factor = 1 if session.problem.metadata.score_type == "maximize" else -1
|
| 35 |
+
code = Path(program_path).read_text().replace("# EVOLVE-BLOCK-START", "").replace("# EVOLVE-BLOCK-END", "").strip()
|
| 36 |
+
logger.info("Code extracted")
|
| 37 |
+
num_public_cases = 50
|
| 38 |
+
cases = session.case_gen(list(range(num_public_cases)))
|
| 39 |
+
public_result = session.case_eval(
|
| 40 |
+
cases, code, code_language="cpp20", skip_local_visualization=True
|
| 41 |
+
)
|
| 42 |
+
logger.info("Public evaluation completed")
|
| 43 |
+
extracted_case = result_feedback(public_result)
|
| 44 |
+
logger.info("Result feedback completed")
|
| 45 |
+
logger.info("ALE-Bench session closed")
|
| 46 |
+
combined_score = public_result.overall_absolute_score * optim_factor / num_public_cases
|
| 47 |
+
if public_result.overall_judge_result != JudgeResult.ACCEPTED and optim_factor == -1:
|
| 48 |
+
combined_score = -sys.maxsize - 1
|
| 49 |
+
session.close()
|
| 50 |
+
return {
|
| 51 |
+
"judge_result": public_result.overall_judge_result.value,
|
| 52 |
+
"overall_score": public_result.overall_absolute_score,
|
| 53 |
+
"max_execution_time_sec": max([case_result.execution_time for case_result in public_result.case_results]),
|
| 54 |
+
"max_memory_usage_mib": max([case_result.memory_usage for case_result in public_result.case_results]) // 1024 // 1024,
|
| 55 |
+
"standard_error": extracted_case.error_str,
|
| 56 |
+
"message": extracted_case.message,
|
| 57 |
+
"combined_score": combined_score,
|
| 58 |
+
}
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Evaluation failed completely: {str(e)}")
|
| 61 |
+
logger.error(traceback.format_exc())
|
| 62 |
+
return {
|
| 63 |
+
"overall_score": 0.0,
|
| 64 |
+
"error": str(e),
|
| 65 |
+
}
|
benchmarks/ale_bench/ale-bench-lite-problems/ahc015/evaluator.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import traceback
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from ale_bench.result import CaseResult, JudgeResult, Result
|
| 4 |
+
from ale_bench_eval.safe_ale_session import start_ale_bench_session
|
| 5 |
+
import logging
|
| 6 |
+
import sys
|
| 7 |
+
logger = logging.getLogger(__name__ + "_" + "ALE_BENCH_EVALUATOR")
|
| 8 |
+
|
| 9 |
+
def result_feedback(result: Result) -> CaseResult:
|
| 10 |
+
if result.overall_judge_result == JudgeResult.ACCEPTED:
|
| 11 |
+
return result.case_results[0]
|
| 12 |
+
else:
|
| 13 |
+
selected_case_idx = 0
|
| 14 |
+
for idx, case_result in enumerate(result.case_results):
|
| 15 |
+
if case_result.judge_result == result.overall_judge_result:
|
| 16 |
+
selected_case_idx = idx
|
| 17 |
+
break
|
| 18 |
+
return result.case_results[selected_case_idx]
|
| 19 |
+
|
| 20 |
+
def evaluate(program_path):
|
| 21 |
+
problem_id = "ahc015"
|
| 22 |
+
logger.info(f"Evaluating program {program_path} for problem {problem_id} in ale bench evaluator")
|
| 23 |
+
try:
|
| 24 |
+
session = None
|
| 25 |
+
logger.info("Starting ALE-Bench session")
|
| 26 |
+
session = start_ale_bench_session(
|
| 27 |
+
problem_id=problem_id,
|
| 28 |
+
lite_version=True,
|
| 29 |
+
num_workers=13,
|
| 30 |
+
)
|
| 31 |
+
logger.info("ALE-Bench session started")
|
| 32 |
+
if not session:
|
| 33 |
+
raise RuntimeError("Failed to start or restart the session.")
|
| 34 |
+
optim_factor = 1 if session.problem.metadata.score_type == "maximize" else -1
|
| 35 |
+
code = Path(program_path).read_text().replace("# EVOLVE-BLOCK-START", "").replace("# EVOLVE-BLOCK-END", "").strip()
|
| 36 |
+
logger.info("Code extracted")
|
| 37 |
+
num_public_cases = 50
|
| 38 |
+
cases = session.case_gen(list(range(num_public_cases)))
|
| 39 |
+
public_result = session.case_eval(
|
| 40 |
+
cases, code, code_language="cpp20", skip_local_visualization=True
|
| 41 |
+
)
|
| 42 |
+
logger.info("Public evaluation completed")
|
| 43 |
+
extracted_case = result_feedback(public_result)
|
| 44 |
+
logger.info("Result feedback completed")
|
| 45 |
+
logger.info("ALE-Bench session closed")
|
| 46 |
+
combined_score = public_result.overall_absolute_score * optim_factor / num_public_cases
|
| 47 |
+
if public_result.overall_judge_result != JudgeResult.ACCEPTED and optim_factor == -1:
|
| 48 |
+
combined_score = -sys.maxsize - 1
|
| 49 |
+
session.close()
|
| 50 |
+
return {
|
| 51 |
+
"judge_result": public_result.overall_judge_result.value,
|
| 52 |
+
"overall_score": public_result.overall_absolute_score,
|
| 53 |
+
"max_execution_time_sec": max([case_result.execution_time for case_result in public_result.case_results]),
|
| 54 |
+
"max_memory_usage_mib": max([case_result.memory_usage for case_result in public_result.case_results]) // 1024 // 1024,
|
| 55 |
+
"standard_error": extracted_case.error_str,
|
| 56 |
+
"message": extracted_case.message,
|
| 57 |
+
"combined_score": combined_score,
|
| 58 |
+
}
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logger.error(f"Evaluation failed completely: {str(e)}")
|
| 61 |
+
logger.error(traceback.format_exc())
|
| 62 |
+
return {
|
| 63 |
+
"overall_score": 0.0,
|
| 64 |
+
"error": str(e),
|
| 65 |
+
}
|
benchmarks/ale_bench/ale-bench-lite-problems/ahc039/best_program.cpp
ADDED
|
@@ -0,0 +1,1003 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <algorithm>
|
| 5 |
+
#include <chrono>
|
| 6 |
+
#include <random>
|
| 7 |
+
#include <set>
|
| 8 |
+
#include <unordered_set>
|
| 9 |
+
#include <cmath>
|
| 10 |
+
#include <iomanip>
|
| 11 |
+
#include <numeric> // For std::iota
|
| 12 |
+
#include <string>
|
| 13 |
+
#include <map>
|
| 14 |
+
|
| 15 |
+
// === MACROS AND CONSTANTS ===
|
| 16 |
+
const int MAX_COORD_VAL = 100000;
|
| 17 |
+
const int MAX_VERTICES = 1000;
|
| 18 |
+
const int MAX_PERIMETER = 400000;
|
| 19 |
+
const double TIME_LIMIT_SECONDS_SAFETY_MARGIN = 0.1; // Increased safety margin
|
| 20 |
+
double ACTUAL_TIME_LIMIT_SECONDS = 2.0;
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
// === RANDOM NUMBER GENERATION ===
|
| 24 |
+
struct XorShift {
|
| 25 |
+
uint64_t x;
|
| 26 |
+
XorShift() : x(std::chrono::steady_clock::now().time_since_epoch().count() ^ ((uint64_t)std::random_device()() << 32) ^ std::random_device()()) {}
|
| 27 |
+
uint64_t next() {
|
| 28 |
+
x ^= x << 13;
|
| 29 |
+
x ^= x >> 7;
|
| 30 |
+
x ^= x << 17;
|
| 31 |
+
return x;
|
| 32 |
+
}
|
| 33 |
+
int next_int(int n) { if (n <= 0) return 0; return next() % n; }
|
| 34 |
+
int next_int(int a, int b) { if (a > b) return a; return a + next_int(b - a + 1); }
|
| 35 |
+
double next_double() { return next() / (double)UINT64_MAX; }
|
| 36 |
+
};
|
| 37 |
+
XorShift rng;
|
| 38 |
+
|
| 39 |
+
// === TIMER ===
|
| 40 |
+
struct Timer {
|
| 41 |
+
std::chrono::steady_clock::time_point start_time;
|
| 42 |
+
Timer() { reset(); }
|
| 43 |
+
void reset() { start_time = std::chrono::steady_clock::now(); }
|
| 44 |
+
double elapsed() const {
|
| 45 |
+
auto now = std::chrono::steady_clock::now();
|
| 46 |
+
return std::chrono::duration_cast<std::chrono::duration<double>>(now - start_time).count();
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
Timer global_timer;
|
| 50 |
+
|
| 51 |
+
// === GEOMETRIC STRUCTURES ===
|
| 52 |
+
struct Point {
|
| 53 |
+
int x, y;
|
| 54 |
+
bool operator<(const Point& other) const {
|
| 55 |
+
if (x != other.x) return x < other.x;
|
| 56 |
+
return y < other.y;
|
| 57 |
+
}
|
| 58 |
+
bool operator==(const Point& other) const {
|
| 59 |
+
return x == other.x && y == other.y;
|
| 60 |
+
}
|
| 61 |
+
Point operator-(const Point& other) const {
|
| 62 |
+
return {x - other.x, y - other.y};
|
| 63 |
+
}
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
struct PointHash {
|
| 67 |
+
std::size_t operator()(const Point& p) const {
|
| 68 |
+
auto h1 = std::hash<int>{}(p.x);
|
| 69 |
+
auto h2 = std::hash<int>{}(p.y);
|
| 70 |
+
// Combining hashes: simple XOR might not be best, but often good enough.
|
| 71 |
+
// For Point, a common way is boost::hash_combine.
|
| 72 |
+
// h1 ^ (h2 << 1) is a common way that's okay.
|
| 73 |
+
return h1 ^ (h2 << 1);
|
| 74 |
+
}
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
long long cross_product(Point a, Point b) {
|
| 78 |
+
return (long long)a.x * b.y - (long long)a.y * b.x;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
struct Fish {
|
| 82 |
+
Point p;
|
| 83 |
+
int type; // 1 for mackerel, -1 for sardine
|
| 84 |
+
};
|
| 85 |
+
std::vector<Fish> all_fish_structs;
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
// === KD-TREE ===
|
| 89 |
+
struct KDNode {
|
| 90 |
+
Point pt;
|
| 91 |
+
int axis;
|
| 92 |
+
KDNode *left = nullptr, *right = nullptr;
|
| 93 |
+
int fish_struct_idx = -1;
|
| 94 |
+
};
|
| 95 |
+
KDNode* fish_kdtree_root = nullptr;
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
KDNode* build_kdtree(std::vector<int>& point_indices, int l, int r, int axis) {
|
| 99 |
+
if (l > r) return nullptr;
|
| 100 |
+
int mid = l + (r - l) / 2;
|
| 101 |
+
|
| 102 |
+
std::nth_element(point_indices.begin() + l, point_indices.begin() + mid, point_indices.begin() + r + 1,
|
| 103 |
+
[&](int a_idx, int b_idx) {
|
| 104 |
+
const Point& pa = all_fish_structs[a_idx].p;
|
| 105 |
+
const Point& pb = all_fish_structs[b_idx].p;
|
| 106 |
+
if (axis == 0) return pa.x < pb.x;
|
| 107 |
+
return pa.y < pb.y;
|
| 108 |
+
});
|
| 109 |
+
|
| 110 |
+
KDNode* node = new KDNode();
|
| 111 |
+
node->fish_struct_idx = point_indices[mid];
|
| 112 |
+
node->pt = all_fish_structs[node->fish_struct_idx].p;
|
| 113 |
+
node->axis = axis;
|
| 114 |
+
|
| 115 |
+
node->left = build_kdtree(point_indices, l, mid - 1, 1 - axis);
|
| 116 |
+
node->right = build_kdtree(point_indices, mid + 1, r, 1 - axis);
|
| 117 |
+
return node;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
/*
|
| 121 |
+
Docstring:
|
| 122 |
+
KD-tree rectangle query (count-only).
|
| 123 |
+
Traverses the KD-tree and increments mackerel/sardine counters for points within the axis-aligned query rectangle,
|
| 124 |
+
avoiding materializing index lists (faster and less memory traffic).
|
| 125 |
+
*/
|
| 126 |
+
void query_kdtree_rectangle(KDNode* node, int min_x, int max_x, int min_y, int max_y, int& cnt_m, int& cnt_s) {
|
| 127 |
+
if (!node || min_x > max_x || min_y > max_y) return;
|
| 128 |
+
|
| 129 |
+
const Point& pt = node->pt;
|
| 130 |
+
if (pt.x >= min_x && pt.x <= max_x && pt.y >= min_y && pt.y <= max_y) {
|
| 131 |
+
if (all_fish_structs[node->fish_struct_idx].type == 1) ++cnt_m; else ++cnt_s;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
if (node->axis == 0) { // Split by X
|
| 135 |
+
if (node->left && min_x <= node->pt.x) query_kdtree_rectangle(node->left, min_x, max_x, min_y, max_y, cnt_m, cnt_s);
|
| 136 |
+
if (node->right && max_x >= node->pt.x) query_kdtree_rectangle(node->right, min_x, max_x, min_y, max_y, cnt_m, cnt_s);
|
| 137 |
+
} else { // Split by Y
|
| 138 |
+
if (node->left && min_y <= node->pt.y) query_kdtree_rectangle(node->left, min_x, max_x, min_y, max_y, cnt_m, cnt_s);
|
| 139 |
+
if (node->right && max_y >= node->pt.y) query_kdtree_rectangle(node->right, min_x, max_x, min_y, max_y, cnt_m, cnt_s);
|
| 140 |
+
}
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
void delete_kdtree(KDNode* node) { // Recursively delete KD-tree nodes
|
| 144 |
+
if (!node) return;
|
| 145 |
+
delete_kdtree(node->left);
|
| 146 |
+
delete_kdtree(node->right);
|
| 147 |
+
delete node;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
// === POLYGON UTILITIES ===
|
| 152 |
+
long long calculate_perimeter(const std::vector<Point>& poly) {
|
| 153 |
+
if (poly.size() < 2) return 0;
|
| 154 |
+
long long perimeter = 0;
|
| 155 |
+
for (size_t i = 0; i < poly.size(); ++i) {
|
| 156 |
+
const Point& p1 = poly[i];
|
| 157 |
+
const Point& p2 = poly[(i + 1) % poly.size()];
|
| 158 |
+
perimeter += std::abs(p1.x - p2.x) + std::abs(p1.y - p2.y);
|
| 159 |
+
}
|
| 160 |
+
return perimeter;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
bool is_on_segment(Point p, Point seg_a, Point seg_b) {
|
| 164 |
+
if (cross_product(seg_b - seg_a, p - seg_a) != 0) return false; // Not collinear
|
| 165 |
+
return std::min(seg_a.x, seg_b.x) <= p.x && p.x <= std::max(seg_a.x, seg_b.x) &&
|
| 166 |
+
std::min(seg_a.y, seg_b.y) <= p.y && p.y <= std::max(seg_a.y, seg_b.y);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
bool is_inside_polygon_wn(Point p, const std::vector<Point>& polygon) {
|
| 170 |
+
int n = polygon.size();
|
| 171 |
+
if (n < 3) return false;
|
| 172 |
+
|
| 173 |
+
// Check if on boundary first
|
| 174 |
+
for (int i = 0; i < n; ++i) {
|
| 175 |
+
if (is_on_segment(p, polygon[i], polygon[(i + 1) % n])) return true;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
int wn = 0; // Winding number
|
| 179 |
+
for (int i = 0; i < n; ++i) {
|
| 180 |
+
Point p1 = polygon[i];
|
| 181 |
+
Point p2 = polygon[(i + 1) % n];
|
| 182 |
+
if (p1.y <= p.y) { // Start y <= P.y
|
| 183 |
+
if (p2.y > p.y && cross_product(p2 - p1, p - p1) > 0) { // An upward crossing, P is left of edge
|
| 184 |
+
wn++;
|
| 185 |
+
}
|
| 186 |
+
} else { // Start y > P.y
|
| 187 |
+
if (p2.y <= p.y && cross_product(p2 - p1, p - p1) < 0) { // A downward crossing, P is right of edge
|
| 188 |
+
wn--;
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
return wn != 0; // wn != 0 means inside; wn == 0 means outside.
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
// Calculate score from scratch by checking all fish
|
| 196 |
+
void calculate_score_from_scratch(const std::vector<Point>& poly, int& m_count, int& s_count) {
|
| 197 |
+
m_count = 0; s_count = 0;
|
| 198 |
+
if (poly.size() < 3) return; // Not a valid polygon for containment
|
| 199 |
+
for (const auto& fish_s : all_fish_structs) {
|
| 200 |
+
if (is_inside_polygon_wn(fish_s.p, poly)) {
|
| 201 |
+
if (fish_s.type == 1) m_count++;
|
| 202 |
+
else s_count++;
|
| 203 |
+
}
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
// Calculate fish counts in a given rectangle using KD-tree
|
| 208 |
+
/*
|
| 209 |
+
Docstring:
|
| 210 |
+
Count fish inside an axis-aligned rectangle using a count-only KD-tree traversal.
|
| 211 |
+
This avoids building intermediate index arrays and reduces per-move overhead in SA.
|
| 212 |
+
*/
|
| 213 |
+
void calculate_score_delta_for_rectangle(int r_min_x, int r_max_x, int r_min_y, int r_max_y,
|
| 214 |
+
int& delta_m, int& delta_s) {
|
| 215 |
+
delta_m = 0; delta_s = 0;
|
| 216 |
+
if (!fish_kdtree_root || r_min_x > r_max_x || r_min_y > r_max_y) return;
|
| 217 |
+
query_kdtree_rectangle(fish_kdtree_root, r_min_x, r_max_x, r_min_y, r_max_y, delta_m, delta_s);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
// Check intersection between two orthogonal segments p1s-p1e and p2s-p2e
|
| 221 |
+
bool segments_intersect(Point p1s, Point p1e, Point p2s, Point p2e) {
|
| 222 |
+
// Normalize segments (sort endpoints to simplify overlap checks)
|
| 223 |
+
if (p1s.x == p1e.x) { if (p1s.y > p1e.y) std::swap(p1s.y, p1e.y); } // Vertical, sort by y
|
| 224 |
+
else { if (p1s.x > p1e.x) std::swap(p1s.x, p1e.x); } // Horizontal, sort by x
|
| 225 |
+
if (p2s.x == p2e.x) { if (p2s.y > p2e.y) std::swap(p2s.y, p2e.y); }
|
| 226 |
+
else { if (p2s.x > p2e.x) std::swap(p2s.x, p2e.x); }
|
| 227 |
+
|
| 228 |
+
bool seg1_is_H = (p1s.y == p1e.y);
|
| 229 |
+
bool seg2_is_H = (p2s.y == p2e.y);
|
| 230 |
+
|
| 231 |
+
if (seg1_is_H == seg2_is_H) { // Both horizontal or both vertical
|
| 232 |
+
if (seg1_is_H) { // Both horizontal
|
| 233 |
+
// Check for y-alignment and x-overlap
|
| 234 |
+
return p1s.y == p2s.y && std::max(p1s.x, p2s.x) <= std::min(p1e.x, p2e.x);
|
| 235 |
+
} else { // Both vertical
|
| 236 |
+
// Check for x-alignment and y-overlap
|
| 237 |
+
return p1s.x == p2s.x && std::max(p1s.y, p2s.y) <= std::min(p1e.y, p2e.y);
|
| 238 |
+
}
|
| 239 |
+
} else { // One horizontal, one vertical (potential T-junction or cross)
|
| 240 |
+
Point h_s = seg1_is_H ? p1s : p2s; Point h_e = seg1_is_H ? p1e : p2e;
|
| 241 |
+
Point v_s = seg1_is_H ? p2s : p1s; Point v_e = seg1_is_H ? p2e : p1e;
|
| 242 |
+
// Check if intersection point (v_s.x, h_s.y) lies on both segments
|
| 243 |
+
return v_s.x >= h_s.x && v_s.x <= h_e.x && // x_intersect within horizontal segment's x-range
|
| 244 |
+
h_s.y >= v_s.y && h_s.y <= v_e.y; // y_intersect within vertical segment's y-range
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
bool check_self_intersection_full(const std::vector<Point>& poly) {
|
| 249 |
+
int M = poly.size();
|
| 250 |
+
if (M < 4) return false;
|
| 251 |
+
for (int i = 0; i < M; ++i) {
|
| 252 |
+
Point p1s = poly[i];
|
| 253 |
+
Point p1e = poly[(i + 1) % M];
|
| 254 |
+
for (int j = i + 2; j < M; ++j) {
|
| 255 |
+
// Skip checking adjacent edges.
|
| 256 |
+
// Edge i is (poly[i], poly[(i+1)%M]). Edge j is (poly[j], poly[(j+1)%M]).
|
| 257 |
+
// If i=0 and j=M-1, then edge i is (poly[0], poly[1]) and edge j is (poly[M-1], poly[0]). These are adjacent.
|
| 258 |
+
if (i == 0 && j == M - 1) continue;
|
| 259 |
+
|
| 260 |
+
Point p2s = poly[j];
|
| 261 |
+
Point p2e = poly[(j + 1) % M];
|
| 262 |
+
if (segments_intersect(p1s, p1e, p2s, p2e)) return true;
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
return false;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
// Local self-intersection check: checks edges starting at critical_edge_start_indices_const against all others
|
| 269 |
+
bool has_self_intersection_locally(const std::vector<Point>& poly, const std::vector<int>& critical_edge_start_indices_const) {
|
| 270 |
+
int M = poly.size();
|
| 271 |
+
if (M < 4) return false;
|
| 272 |
+
|
| 273 |
+
std::vector<int> critical_indices = critical_edge_start_indices_const; // Make a copy to modify
|
| 274 |
+
if (critical_indices.empty()) {
|
| 275 |
+
return false;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
std::sort(critical_indices.begin(), critical_indices.end());
|
| 279 |
+
critical_indices.erase(std::unique(critical_indices.begin(), critical_indices.end()), critical_indices.end());
|
| 280 |
+
|
| 281 |
+
for (int edge1_s_idx_val_orig : critical_indices) {
|
| 282 |
+
int edge1_s_idx_val = (edge1_s_idx_val_orig % M + M) % M; // Ensure positive modulo
|
| 283 |
+
// No need to check edge1_s_idx_val bounds, it will be in [0, M-1]
|
| 284 |
+
|
| 285 |
+
Point p1s = poly[edge1_s_idx_val];
|
| 286 |
+
Point p1e = poly[(edge1_s_idx_val + 1) % M];
|
| 287 |
+
|
| 288 |
+
for (int edge2_s_idx = 0; edge2_s_idx < M; ++edge2_s_idx) {
|
| 289 |
+
bool is_adj_or_same_to_p1s_p1e = (edge2_s_idx == edge1_s_idx_val || // Same edge
|
| 290 |
+
edge2_s_idx == (edge1_s_idx_val + 1) % M || // edge2 starts where edge1 ends
|
| 291 |
+
(edge2_s_idx + 1) % M == edge1_s_idx_val); // edge2 ends where edge1 starts
|
| 292 |
+
if (is_adj_or_same_to_p1s_p1e) continue;
|
| 293 |
+
|
| 294 |
+
Point p2s = poly[edge2_s_idx];
|
| 295 |
+
Point p2e = poly[(edge2_s_idx + 1) % M];
|
| 296 |
+
if (segments_intersect(p1s, p1e, p2s, p2e)) {
|
| 297 |
+
return true;
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
return false;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
bool has_distinct_vertices_unordered(const std::vector<Point>& poly) {
|
| 306 |
+
if (poly.empty()) return true;
|
| 307 |
+
std::unordered_set<Point, PointHash> distinct_pts;
|
| 308 |
+
distinct_pts.reserve(poly.size()); // Pre-allocate for efficiency
|
| 309 |
+
for(const auto& p : poly) {
|
| 310 |
+
if (!distinct_pts.insert(p).second) return false; // Insertion failed, duplicate found
|
| 311 |
+
}
|
| 312 |
+
return true;
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
/*
|
| 316 |
+
has_duplicate_vertices_local:
|
| 317 |
+
Fast local duplicate check used inside SA. It only verifies that the subset
|
| 318 |
+
of modified vertices does not collide (same coordinates) with any other vertex.
|
| 319 |
+
This is sufficient because all other vertices were already distinct before the move.
|
| 320 |
+
*/
|
| 321 |
+
bool has_duplicate_vertices_local(const std::vector<Point>& poly, const std::vector<int>& changed_indices) {
|
| 322 |
+
int m = (int)poly.size();
|
| 323 |
+
if (m <= 1 || changed_indices.empty()) return false;
|
| 324 |
+
for (int idx : changed_indices) {
|
| 325 |
+
int i = ((idx % m) + m) % m;
|
| 326 |
+
const Point& p = poly[i];
|
| 327 |
+
for (int j = 0; j < m; ++j) {
|
| 328 |
+
if (j == i) continue;
|
| 329 |
+
if (poly[j].x == p.x && poly[j].y == p.y) return true;
|
| 330 |
+
}
|
| 331 |
+
}
|
| 332 |
+
return false;
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
// Check basic structural validity of the polygon with early perimeter exit
|
| 336 |
+
bool is_polygon_structurally_sound(const std::vector<Point>& poly) {
|
| 337 |
+
// Ensures axis-parallel edges, valid bounds, non-zero edges, and perimeter within limit
|
| 338 |
+
int m = (int)poly.size();
|
| 339 |
+
if (m != 0 && (m < 4 || m > MAX_VERTICES)) return false;
|
| 340 |
+
if (m == 0) return true;
|
| 341 |
+
|
| 342 |
+
long long perim = 0;
|
| 343 |
+
for (int i = 0; i < m; ++i) {
|
| 344 |
+
const Point& p1 = poly[i];
|
| 345 |
+
const Point& p2 = poly[(i + 1) % m];
|
| 346 |
+
|
| 347 |
+
// bounds check
|
| 348 |
+
if (p1.x < 0 || p1.x > MAX_COORD_VAL || p1.y < 0 || p1.y > MAX_COORD_VAL) return false;
|
| 349 |
+
|
| 350 |
+
// axis-aligned and non-zero
|
| 351 |
+
if (p1.x != p2.x && p1.y != p2.y) return false;
|
| 352 |
+
if (p1.x == p2.x && p1.y == p2.y) return false;
|
| 353 |
+
|
| 354 |
+
// perimeter accumulation with early abort
|
| 355 |
+
perim += std::abs(p1.x - p2.x) + std::abs(p1.y - p2.y);
|
| 356 |
+
if (perim > MAX_PERIMETER) return false;
|
| 357 |
+
}
|
| 358 |
+
return true;
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
// Initial polygon generation using Kadane's algorithm on a coarse grid
|
| 362 |
+
std::vector<Point> create_initial_polygon_kadane() {
|
| 363 |
+
const int GRID_SIZE_KADANE = 300; // Tunable parameter (smaller for faster initialization)
|
| 364 |
+
const int NUM_VALUES_KADANE = MAX_COORD_VAL + 1;
|
| 365 |
+
// Ensure ACTUAL_CELL_DIM_KADANE is at least 1
|
| 366 |
+
const int ACTUAL_CELL_DIM_KADANE = std::max(1, (NUM_VALUES_KADANE + GRID_SIZE_KADANE - 1) / GRID_SIZE_KADANE);
|
| 367 |
+
|
| 368 |
+
std::vector<std::vector<long long>> grid_scores(GRID_SIZE_KADANE, std::vector<long long>(GRID_SIZE_KADANE, 0));
|
| 369 |
+
for (const auto& fish_s : all_fish_structs) {
|
| 370 |
+
int r = fish_s.p.y / ACTUAL_CELL_DIM_KADANE;
|
| 371 |
+
int c = fish_s.p.x / ACTUAL_CELL_DIM_KADANE;
|
| 372 |
+
r = std::min(r, GRID_SIZE_KADANE - 1); r = std::max(r,0);
|
| 373 |
+
c = std::min(c, GRID_SIZE_KADANE - 1); c = std::max(c,0);
|
| 374 |
+
grid_scores[r][c] += fish_s.type; // Mackerel +1, Sardine -1
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
long long max_so_far = -3e18; // Sufficiently small number
|
| 378 |
+
int best_r1 = 0, best_c1 = 0, best_r2 = -1, best_c2 = -1;
|
| 379 |
+
|
| 380 |
+
// 2D Kadane's algorithm
|
| 381 |
+
for (int c1_idx = 0; c1_idx < GRID_SIZE_KADANE; ++c1_idx) {
|
| 382 |
+
std::vector<long long> col_strip_sum(GRID_SIZE_KADANE, 0);
|
| 383 |
+
for (int c2_idx = c1_idx; c2_idx < GRID_SIZE_KADANE; ++c2_idx) {
|
| 384 |
+
for (int r_idx = 0; r_idx < GRID_SIZE_KADANE; ++r_idx) {
|
| 385 |
+
col_strip_sum[r_idx] += grid_scores[r_idx][c2_idx];
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
// 1D Kadane's on col_strip_sum
|
| 389 |
+
long long current_strip_val = 0;
|
| 390 |
+
int current_r1_1d = 0;
|
| 391 |
+
for (int r2_idx_1d = 0; r2_idx_1d < GRID_SIZE_KADANE; ++r2_idx_1d) {
|
| 392 |
+
long long val_here = col_strip_sum[r2_idx_1d];
|
| 393 |
+
if (current_strip_val > 0 && current_strip_val + val_here > 0) { // Extend if sum remains positive
|
| 394 |
+
current_strip_val += val_here;
|
| 395 |
+
} else { // Start new subarray
|
| 396 |
+
current_strip_val = val_here;
|
| 397 |
+
current_r1_1d = r2_idx_1d;
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
if (current_strip_val > max_so_far) {
|
| 401 |
+
max_so_far = current_strip_val;
|
| 402 |
+
best_r1 = current_r1_1d;
|
| 403 |
+
best_r2 = r2_idx_1d;
|
| 404 |
+
best_c1 = c1_idx;
|
| 405 |
+
best_c2 = c2_idx;
|
| 406 |
+
}
|
| 407 |
+
}
|
| 408 |
+
}
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
std::vector<Point> default_poly = {{0,0}, {1,0}, {1,1}, {0,1}}; // Minimal valid polygon
|
| 412 |
+
|
| 413 |
+
// If no positive sum found, or issue, find best single cell
|
| 414 |
+
if (best_r2 == -1 || max_so_far <=0 ) {
|
| 415 |
+
max_so_far = -3e18; // Reset search for single best cell
|
| 416 |
+
bool found_cell = false;
|
| 417 |
+
for(int r=0; r<GRID_SIZE_KADANE; ++r) for(int c=0; c<GRID_SIZE_KADANE; ++c) {
|
| 418 |
+
if(grid_scores[r][c] > max_so_far) {
|
| 419 |
+
max_so_far = grid_scores[r][c];
|
| 420 |
+
best_r1 = r; best_r2 = r; // Single cell
|
| 421 |
+
best_c1 = c; best_c2 = c;
|
| 422 |
+
found_cell = true;
|
| 423 |
+
}
|
| 424 |
+
}
|
| 425 |
+
if (!found_cell || max_so_far <=0) return default_poly; // Still no good cell, return default
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
// Convert grid cell indices to actual coordinates
|
| 429 |
+
int x_start = best_c1 * ACTUAL_CELL_DIM_KADANE;
|
| 430 |
+
int y_start = best_r1 * ACTUAL_CELL_DIM_KADANE;
|
| 431 |
+
int x_end = (best_c2 + 1) * ACTUAL_CELL_DIM_KADANE -1;
|
| 432 |
+
int y_end = (best_r2 + 1) * ACTUAL_CELL_DIM_KADANE -1;
|
| 433 |
+
|
| 434 |
+
// Clamp coordinates to valid range
|
| 435 |
+
x_start = std::max(0, std::min(MAX_COORD_VAL, x_start));
|
| 436 |
+
y_start = std::max(0, std::min(MAX_COORD_VAL, y_start));
|
| 437 |
+
x_end = std::max(x_start, std::min(MAX_COORD_VAL, x_end)); // Ensure x_end >= x_start
|
| 438 |
+
y_end = std::max(y_start, std::min(MAX_COORD_VAL, y_end)); // Ensure y_end >= y_start
|
| 439 |
+
|
| 440 |
+
// Ensure non-zero dimensions for the polygon, minimum 1x1 actual area
|
| 441 |
+
if (x_start == x_end) {
|
| 442 |
+
if (x_start < MAX_COORD_VAL) x_end = x_start + 1;
|
| 443 |
+
else if (x_start > 0) x_start = x_start -1; // Can't expand right, try expand left
|
| 444 |
+
else return default_poly; // Single point at MAX_COORD_VAL, cannot form 1x1
|
| 445 |
+
}
|
| 446 |
+
if (y_start == y_end) {
|
| 447 |
+
if (y_start < MAX_COORD_VAL) y_end = y_start + 1;
|
| 448 |
+
else if (y_start > 0) y_start = y_start - 1;
|
| 449 |
+
else return default_poly;
|
| 450 |
+
}
|
| 451 |
+
// After adjustment, if still degenerate, use default. This is rare.
|
| 452 |
+
if (x_start == x_end || y_start == y_end) return default_poly;
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
std::vector<Point> initial_poly = {
|
| 456 |
+
{x_start, y_start}, {x_end, y_start}, {x_end, y_end}, {x_start, y_end}
|
| 457 |
+
};
|
| 458 |
+
return initial_poly;
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
// === SIMULATED ANNEALING ===
|
| 462 |
+
struct SAState {
|
| 463 |
+
std::vector<Point> poly;
|
| 464 |
+
int m_count;
|
| 465 |
+
int s_count;
|
| 466 |
+
|
| 467 |
+
SAState() : m_count(0), s_count(0) {}
|
| 468 |
+
|
| 469 |
+
long long get_objective_score() const {
|
| 470 |
+
return std::max(0LL, (long long)m_count - s_count + 1);
|
| 471 |
+
}
|
| 472 |
+
double get_raw_objective_score() const { // Used for SA acceptance probability
|
| 473 |
+
return (double)m_count - s_count;
|
| 474 |
+
}
|
| 475 |
+
};
|
| 476 |
+
|
| 477 |
+
// Calculates signed area * 2 of a polygon (shoelace formula)
|
| 478 |
+
long long polygon_signed_area_times_2(const std::vector<Point>& poly) {
|
| 479 |
+
if (poly.size() < 3) return 0;
|
| 480 |
+
long long area_sum = 0;
|
| 481 |
+
for (size_t i = 0; i < poly.size(); ++i) {
|
| 482 |
+
const Point& p1 = poly[i];
|
| 483 |
+
const Point& p2 = poly[(i + 1) % poly.size()];
|
| 484 |
+
area_sum += (long long)(p1.x - p2.x) * (p1.y + p2.y); // (x1-x2)(y1+y2) variant
|
| 485 |
+
}
|
| 486 |
+
return area_sum; // Positive for CCW, negative for CW
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
std::vector<int> sa_critical_edge_indices_cache; // Cache for local intersection check
|
| 490 |
+
|
| 491 |
+
// Guide coordinates for SA moves
|
| 492 |
+
std::vector<int> static_x_guides;
|
| 493 |
+
std::vector<int> static_y_guides;
|
| 494 |
+
std::vector<int> best_poly_x_guides;
|
| 495 |
+
std::vector<int> best_poly_y_guides;
|
| 496 |
+
|
| 497 |
+
void update_best_poly_guides(const SAState& new_best_state) {
|
| 498 |
+
best_poly_x_guides.clear();
|
| 499 |
+
best_poly_y_guides.clear();
|
| 500 |
+
if (new_best_state.poly.empty()) return;
|
| 501 |
+
|
| 502 |
+
std::set<int> temp_x_set, temp_y_set;
|
| 503 |
+
for (const auto& p : new_best_state.poly) {
|
| 504 |
+
temp_x_set.insert(p.x);
|
| 505 |
+
temp_y_set.insert(p.y);
|
| 506 |
+
}
|
| 507 |
+
best_poly_x_guides.assign(temp_x_set.begin(), temp_x_set.end());
|
| 508 |
+
best_poly_y_guides.assign(temp_y_set.begin(), temp_y_set.end());
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
/*
|
| 513 |
+
compress_polygon_collinear:
|
| 514 |
+
Remove all intermediate vertices that lie on straight segments (three consecutive vertices collinear).
|
| 515 |
+
This reduces vertex count/perimeter without changing the enclosed area or legality.
|
| 516 |
+
*/
|
| 517 |
+
void compress_polygon_collinear(std::vector<Point>& poly) {
|
| 518 |
+
if (poly.size() < 5) return; // keep minimal 4-vertex polygon
|
| 519 |
+
bool changed = true;
|
| 520 |
+
int guard = 0;
|
| 521 |
+
while (changed && guard < 2) { // two passes are enough to handle wrap-around effects
|
| 522 |
+
changed = false;
|
| 523 |
+
for (size_t i = 0; i < poly.size();) {
|
| 524 |
+
size_t m = poly.size();
|
| 525 |
+
if (m <= 4) return;
|
| 526 |
+
size_t i0 = (i + m - 1) % m;
|
| 527 |
+
size_t i1 = i;
|
| 528 |
+
size_t i2 = (i + 1) % m;
|
| 529 |
+
const Point& p0 = poly[i0];
|
| 530 |
+
const Point& p1 = poly[i1];
|
| 531 |
+
const Point& p2 = poly[i2];
|
| 532 |
+
bool col_x = (p0.x == p1.x && p1.x == p2.x);
|
| 533 |
+
bool col_y = (p0.y == p1.y && p1.y == p2.y);
|
| 534 |
+
if (col_x || col_y) {
|
| 535 |
+
poly.erase(poly.begin() + (int)i1);
|
| 536 |
+
changed = true;
|
| 537 |
+
// re-check at this index after erase
|
| 538 |
+
} else {
|
| 539 |
+
++i;
|
| 540 |
+
}
|
| 541 |
+
}
|
| 542 |
+
++guard;
|
| 543 |
+
}
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
/*
|
| 551 |
+
Docstring:
|
| 552 |
+
Simulated annealing with three moves: move edge (snap/random), add bulge, and simplify.
|
| 553 |
+
Remove-bulge is disabled (empirically improves score/time and simplifies code).
|
| 554 |
+
Score deltas via KD-tree rectangle count; local checks enforce validity.
|
| 555 |
+
*/
|
| 556 |
+
void simulated_annealing_main() {
|
| 557 |
+
SAState current_state;
|
| 558 |
+
current_state.poly = create_initial_polygon_kadane();
|
| 559 |
+
calculate_score_from_scratch(current_state.poly, current_state.m_count, current_state.s_count);
|
| 560 |
+
|
| 561 |
+
std::vector<Point> default_tiny_poly = {{0,0}, {1,0}, {1,1}, {0,1}};
|
| 562 |
+
|
| 563 |
+
// Ensure initial polygon is valid, otherwise use default
|
| 564 |
+
bool current_poly_initial_valid = is_polygon_structurally_sound(current_state.poly) &&
|
| 565 |
+
current_state.poly.size() >= 4 &&
|
| 566 |
+
has_distinct_vertices_unordered(current_state.poly) &&
|
| 567 |
+
!check_self_intersection_full(current_state.poly);
|
| 568 |
+
|
| 569 |
+
if (!current_poly_initial_valid) {
|
| 570 |
+
current_state.poly = default_tiny_poly;
|
| 571 |
+
calculate_score_from_scratch(current_state.poly, current_state.m_count, current_state.s_count);
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
SAState best_state = current_state;
|
| 575 |
+
update_best_poly_guides(best_state);
|
| 576 |
+
|
| 577 |
+
// Prepare static guide coordinates from fish locations
|
| 578 |
+
std::set<int> sx_set, sy_set;
|
| 579 |
+
for(const auto& f_s : all_fish_structs) {
|
| 580 |
+
sx_set.insert(f_s.p.x); sx_set.insert(std::max(0,f_s.p.x-1)); sx_set.insert(std::min(MAX_COORD_VAL, f_s.p.x+1));
|
| 581 |
+
sy_set.insert(f_s.p.y); sy_set.insert(std::max(0,f_s.p.y-1)); sy_set.insert(std::min(MAX_COORD_VAL, f_s.p.y+1));
|
| 582 |
+
}
|
| 583 |
+
sx_set.insert(0); sx_set.insert(MAX_COORD_VAL); // Boundary guides
|
| 584 |
+
sy_set.insert(0); sy_set.insert(MAX_COORD_VAL);
|
| 585 |
+
|
| 586 |
+
static_x_guides.assign(sx_set.begin(), sx_set.end());
|
| 587 |
+
static_y_guides.assign(sy_set.begin(), sy_set.end());
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
double start_temp = 150.0;
|
| 591 |
+
double end_temp = 0.01;
|
| 592 |
+
|
| 593 |
+
long long current_signed_area = polygon_signed_area_times_2(current_state.poly);
|
| 594 |
+
if (current_signed_area == 0 && current_state.poly.size() >=3) {
|
| 595 |
+
current_signed_area = 1; // Avoid issues with zero area for sign logic
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
sa_critical_edge_indices_cache.reserve(10); // Max expected critical edges for current moves
|
| 599 |
+
std::vector<int> changed_vertex_indices; // indices of vertices modified in the current move
|
| 600 |
+
changed_vertex_indices.reserve(4);
|
| 601 |
+
|
| 602 |
+
while (global_timer.elapsed() < ACTUAL_TIME_LIMIT_SECONDS) {
|
| 603 |
+
double time_ratio = global_timer.elapsed() / ACTUAL_TIME_LIMIT_SECONDS;
|
| 604 |
+
double temperature = start_temp * std::pow(end_temp / start_temp, time_ratio);
|
| 605 |
+
// Fine-tune temperature near end or if it drops too fast
|
| 606 |
+
if (temperature < end_temp && time_ratio < 0.95) temperature = end_temp;
|
| 607 |
+
if (time_ratio > 0.95 && temperature > end_temp * 0.1) temperature = end_temp * 0.1; // Lower temp aggressively at the very end
|
| 608 |
+
|
| 609 |
+
if (current_state.poly.size() < 4) { // Should not happen if logic is correct, but as a safeguard
|
| 610 |
+
current_state.poly = default_tiny_poly;
|
| 611 |
+
calculate_score_from_scratch(current_state.poly, current_state.m_count, current_state.s_count);
|
| 612 |
+
current_signed_area = polygon_signed_area_times_2(current_state.poly);
|
| 613 |
+
if (current_signed_area == 0 && current_state.poly.size() >=3) current_signed_area = 1;
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
SAState candidate_state = current_state;
|
| 617 |
+
sa_critical_edge_indices_cache.clear();
|
| 618 |
+
changed_vertex_indices.clear();
|
| 619 |
+
|
| 620 |
+
int move_type_roll = rng.next_int(100);
|
| 621 |
+
// Base probabilities for moves (simpler, empirically stronger)
|
| 622 |
+
int move_edge_prob = 48;
|
| 623 |
+
int add_bulge_prob = 24;
|
| 624 |
+
// Remaining probability for simplify polygon move
|
| 625 |
+
|
| 626 |
+
long long current_poly_perimeter_cached = 0;
|
| 627 |
+
bool check_limits = (candidate_state.poly.size() > 200 || candidate_state.poly.size() > MAX_VERTICES - 20);
|
| 628 |
+
if (check_limits && candidate_state.poly.size() > 200) {
|
| 629 |
+
current_poly_perimeter_cached = calculate_perimeter(candidate_state.poly);
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
// Adjust move probabilities based on polygon size/perimeter
|
| 633 |
+
if (candidate_state.poly.size() + 2 > MAX_VERTICES || (check_limits && current_poly_perimeter_cached > MAX_PERIMETER * 0.95)) { // If adding bulge would exceed max vertices
|
| 634 |
+
move_edge_prob = 45; add_bulge_prob = 0; // Disallow adding vertices near limits
|
| 635 |
+
} else if (candidate_state.poly.size() > 200 || (check_limits && current_poly_perimeter_cached > MAX_PERIMETER * 0.9)) {
|
| 636 |
+
move_edge_prob = 40; add_bulge_prob = 15;
|
| 637 |
+
} else if (candidate_state.poly.size() > 50) {
|
| 638 |
+
move_edge_prob = 45; add_bulge_prob = 20;
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
bool move_made = false;
|
| 642 |
+
|
| 643 |
+
// Probabilities for snapping to guide coordinates
|
| 644 |
+
double prob_dynamic_guide_snap = 0.20 + 0.20 * time_ratio;
|
| 645 |
+
double prob_static_guide_snap_if_not_dynamic = 0.75;
|
| 646 |
+
|
| 647 |
+
if (move_type_roll < move_edge_prob && candidate_state.poly.size() >= 4 ) { // Move Edge
|
| 648 |
+
int edge_idx = rng.next_int(candidate_state.poly.size());
|
| 649 |
+
Point p1_orig = candidate_state.poly[edge_idx];
|
| 650 |
+
Point p2_orig = candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()];
|
| 651 |
+
|
| 652 |
+
int new_coord_val = -1;
|
| 653 |
+
int cur_delta_m=0, cur_delta_s=0;
|
| 654 |
+
bool coord_selected_successfully = false;
|
| 655 |
+
|
| 656 |
+
// Determine which guides are relevant (X or Y)
|
| 657 |
+
const std::vector<int>* relevant_dyn_guides = (p1_orig.x == p2_orig.x) ? &best_poly_x_guides : &best_poly_y_guides;
|
| 658 |
+
const std::vector<int>* relevant_static_guides = (p1_orig.x == p2_orig.x) ? &static_x_guides : &static_y_guides;
|
| 659 |
+
|
| 660 |
+
// Try snapping to dynamic (best poly) guides
|
| 661 |
+
if (!relevant_dyn_guides->empty() && rng.next_double() < prob_dynamic_guide_snap) {
|
| 662 |
+
new_coord_val = (*relevant_dyn_guides)[rng.next_int(relevant_dyn_guides->size())];
|
| 663 |
+
coord_selected_successfully = true;
|
| 664 |
+
}
|
| 665 |
+
// If not, try snapping to static (fish) guides
|
| 666 |
+
if (!coord_selected_successfully) {
|
| 667 |
+
if (!relevant_static_guides->empty() && rng.next_double() < prob_static_guide_snap_if_not_dynamic) {
|
| 668 |
+
new_coord_val = (*relevant_static_guides)[rng.next_int(relevant_static_guides->size())];
|
| 669 |
+
coord_selected_successfully = true;
|
| 670 |
+
}
|
| 671 |
+
}
|
| 672 |
+
// If still not selected, use random displacement
|
| 673 |
+
if (!coord_selected_successfully) {
|
| 674 |
+
double step_factor = std::max(0.1, 1.0 - time_ratio * 0.95); // Step size decreases over time
|
| 675 |
+
int base_step_max = std::max(1, (int)( (MAX_COORD_VAL/150.0) * step_factor + 1 ) );
|
| 676 |
+
int random_displacement = rng.next_int(-base_step_max, base_step_max);
|
| 677 |
+
if (time_ratio > 0.75 && rng.next_double() < 0.7) { // Very small steps near end
|
| 678 |
+
random_displacement = rng.next_int(-2,2);
|
| 679 |
+
}
|
| 680 |
+
if (random_displacement == 0) random_displacement = (rng.next_double() < 0.5) ? -1:1;
|
| 681 |
+
|
| 682 |
+
if (p1_orig.x == p2_orig.x) new_coord_val = p1_orig.x + random_displacement; // Vertical edge, move X
|
| 683 |
+
else new_coord_val = p1_orig.y + random_displacement; // Horizontal edge, move Y
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
new_coord_val = std::max(0, std::min(MAX_COORD_VAL, new_coord_val)); // Clamp to bounds
|
| 687 |
+
|
| 688 |
+
if (p1_orig.x == p2_orig.x) { // Vertical edge: (X_orig, Y_s) to (X_orig, Y_e)
|
| 689 |
+
if (new_coord_val == p1_orig.x) {move_made = false; goto end_move_attempt_label;} // No change
|
| 690 |
+
|
| 691 |
+
int query_min_x, query_max_x;
|
| 692 |
+
if (new_coord_val > p1_orig.x) { // Moved right
|
| 693 |
+
query_min_x = p1_orig.x + 1;
|
| 694 |
+
query_max_x = new_coord_val;
|
| 695 |
+
} else { // Moved left (new_coord_val < p1_orig.x)
|
| 696 |
+
query_min_x = new_coord_val;
|
| 697 |
+
query_max_x = p1_orig.x - 1;
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
calculate_score_delta_for_rectangle(
|
| 701 |
+
query_min_x, query_max_x,
|
| 702 |
+
std::min(p1_orig.y, p2_orig.y), std::max(p1_orig.y, p2_orig.y),
|
| 703 |
+
cur_delta_m, cur_delta_s);
|
| 704 |
+
|
| 705 |
+
int sign = (new_coord_val > p1_orig.x) ? 1 : -1; // Moving right is positive X change
|
| 706 |
+
if (p1_orig.y > p2_orig.y) sign *= -1; // Correct for edge Y-direction (p1_orig.y to p2_orig.y)
|
| 707 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon (area < 0)
|
| 708 |
+
|
| 709 |
+
candidate_state.poly[edge_idx].x = new_coord_val;
|
| 710 |
+
candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()].x = new_coord_val;
|
| 711 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 712 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 713 |
+
} else { // Horizontal edge: (X_s, Y_orig) to (X_e, Y_orig)
|
| 714 |
+
if (new_coord_val == p1_orig.y) {move_made = false; goto end_move_attempt_label;} // No change
|
| 715 |
+
|
| 716 |
+
int query_min_y, query_max_y;
|
| 717 |
+
if (new_coord_val > p1_orig.y) { // Moved up (Y increases)
|
| 718 |
+
query_min_y = p1_orig.y + 1;
|
| 719 |
+
query_max_y = new_coord_val;
|
| 720 |
+
} else { // Moved down (Y decreases, new_coord_val < p1_orig.y)
|
| 721 |
+
query_min_y = new_coord_val;
|
| 722 |
+
query_max_y = p1_orig.y - 1;
|
| 723 |
+
}
|
| 724 |
+
|
| 725 |
+
calculate_score_delta_for_rectangle(
|
| 726 |
+
std::min(p1_orig.x, p2_orig.x), std::max(p1_orig.x, p2_orig.x),
|
| 727 |
+
query_min_y, query_max_y,
|
| 728 |
+
cur_delta_m, cur_delta_s);
|
| 729 |
+
|
| 730 |
+
int sign = (new_coord_val < p1_orig.y) ? 1 : -1; // Moving "down" (Y decreases) means positive sign if it expands area
|
| 731 |
+
if (p1_orig.x > p2_orig.x) sign *= -1; // Correct for edge X-direction (p1_orig.x to p2_orig.x)
|
| 732 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon
|
| 733 |
+
|
| 734 |
+
candidate_state.poly[edge_idx].y = new_coord_val;
|
| 735 |
+
candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()].y = new_coord_val;
|
| 736 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 737 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 738 |
+
}
|
| 739 |
+
int M_cand = candidate_state.poly.size();
|
| 740 |
+
sa_critical_edge_indices_cache.push_back((edge_idx - 1 + M_cand) % M_cand);
|
| 741 |
+
sa_critical_edge_indices_cache.push_back(edge_idx);
|
| 742 |
+
sa_critical_edge_indices_cache.push_back((edge_idx + 1) % M_cand);
|
| 743 |
+
changed_vertex_indices.clear();
|
| 744 |
+
changed_vertex_indices.push_back(edge_idx);
|
| 745 |
+
changed_vertex_indices.push_back((edge_idx + 1) % M_cand);
|
| 746 |
+
move_made = true;
|
| 747 |
+
|
| 748 |
+
} else if (move_type_roll < move_edge_prob + add_bulge_prob && candidate_state.poly.size() + 2 <= MAX_VERTICES && candidate_state.poly.size() >=4) { // Add Bulge
|
| 749 |
+
int edge_idx = rng.next_int(candidate_state.poly.size());
|
| 750 |
+
Point p_s = candidate_state.poly[edge_idx]; // Start point of edge
|
| 751 |
+
Point p_e = candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()]; // End point of edge
|
| 752 |
+
|
| 753 |
+
int new_coord_val = -1;
|
| 754 |
+
bool coord_selected_successfully = false;
|
| 755 |
+
|
| 756 |
+
const std::vector<int>* relevant_dyn_guides = (p_s.x == p_e.x) ? &best_poly_x_guides : &best_poly_y_guides;
|
| 757 |
+
const std::vector<int>* relevant_static_guides = (p_s.x == p_e.x) ? &static_x_guides : &static_y_guides;
|
| 758 |
+
|
| 759 |
+
// Try snapping bulge coord
|
| 760 |
+
if (!relevant_dyn_guides->empty() && rng.next_double() < prob_dynamic_guide_snap) {
|
| 761 |
+
new_coord_val = (*relevant_dyn_guides)[rng.next_int(relevant_dyn_guides->size())];
|
| 762 |
+
coord_selected_successfully = true;
|
| 763 |
+
}
|
| 764 |
+
if (!coord_selected_successfully) {
|
| 765 |
+
if (!relevant_static_guides->empty() && rng.next_double() < prob_static_guide_snap_if_not_dynamic) {
|
| 766 |
+
new_coord_val = (*relevant_static_guides)[rng.next_int(relevant_static_guides->size())];
|
| 767 |
+
coord_selected_successfully = true;
|
| 768 |
+
}
|
| 769 |
+
}
|
| 770 |
+
// If not snapped, random depth for bulge
|
| 771 |
+
if (!coord_selected_successfully) {
|
| 772 |
+
double depth_factor = std::max(0.1, 1.0 - time_ratio * 0.9);
|
| 773 |
+
int base_depth_max = std::max(1, (int)( (MAX_COORD_VAL/300.0) * depth_factor + 1 ) );
|
| 774 |
+
int random_abs_depth = rng.next_int(1, base_depth_max);
|
| 775 |
+
if (time_ratio > 0.75 && rng.next_double() < 0.7) {
|
| 776 |
+
random_abs_depth = rng.next_int(1,2);
|
| 777 |
+
}
|
| 778 |
+
int bulge_dir_sign = (rng.next_double() < 0.5) ? 1 : -1; // Randomly outwards or inwards relative to edge line
|
| 779 |
+
if (p_s.x == p_e.x) new_coord_val = p_s.x + bulge_dir_sign * random_abs_depth; // Vertical edge, bulge in X
|
| 780 |
+
else new_coord_val = p_s.y + bulge_dir_sign * random_abs_depth; // Horizontal edge, bulge in Y
|
| 781 |
+
}
|
| 782 |
+
|
| 783 |
+
new_coord_val = std::max(0, std::min(MAX_COORD_VAL, new_coord_val));
|
| 784 |
+
|
| 785 |
+
Point v1_mod, v2_mod; // New vertices for the bulge
|
| 786 |
+
int cur_delta_m=0, cur_delta_s=0;
|
| 787 |
+
|
| 788 |
+
if (p_s.x == p_e.x) { // Original edge is vertical
|
| 789 |
+
if (new_coord_val == p_s.x) {move_made = false; goto end_move_attempt_label;} // Bulge is flat
|
| 790 |
+
v1_mod = {new_coord_val, p_s.y}; v2_mod = {new_coord_val, p_e.y};
|
| 791 |
+
// Rectangle for delta score is between X=p_s.x and X=new_coord_val, over Y-span of original edge
|
| 792 |
+
calculate_score_delta_for_rectangle(
|
| 793 |
+
std::min(p_s.x, new_coord_val), std::max(p_s.x, new_coord_val),
|
| 794 |
+
std::min(p_s.y,p_e.y), std::max(p_s.y,p_e.y),
|
| 795 |
+
cur_delta_m, cur_delta_s);
|
| 796 |
+
int sign = (new_coord_val > p_s.x) ? 1 : -1; // Bulge to the right of edge is positive X change
|
| 797 |
+
if (p_s.y > p_e.y) sign *= -1; // Correct for edge Y-direction
|
| 798 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon
|
| 799 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 800 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 801 |
+
} else { // Original edge is horizontal
|
| 802 |
+
if (new_coord_val == p_s.y) {move_made = false; goto end_move_attempt_label;} // Bulge is flat
|
| 803 |
+
v1_mod = {p_s.x, new_coord_val}; v2_mod = {p_e.x, new_coord_val};
|
| 804 |
+
// Rectangle for delta score is between Y=p_s.y and Y=new_coord_val, over X-span of original edge
|
| 805 |
+
calculate_score_delta_for_rectangle(
|
| 806 |
+
std::min(p_s.x,p_e.x), std::max(p_s.x,p_e.x),
|
| 807 |
+
std::min(p_s.y, new_coord_val), std::max(p_s.y, new_coord_val),
|
| 808 |
+
cur_delta_m, cur_delta_s);
|
| 809 |
+
int sign = (new_coord_val < p_s.y) ? 1 : -1; // Bulge "downwards" (Y decreases) means positive sign if it expands area
|
| 810 |
+
if (p_s.x > p_e.x) sign *= -1; // Correct for edge X-direction
|
| 811 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon
|
| 812 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 813 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 814 |
+
}
|
| 815 |
+
|
| 816 |
+
// Insert new vertices into polygon
|
| 817 |
+
auto insert_pos_iter = candidate_state.poly.begin() + (edge_idx + 1);
|
| 818 |
+
insert_pos_iter = candidate_state.poly.insert(insert_pos_iter, v1_mod);
|
| 819 |
+
candidate_state.poly.insert(insert_pos_iter + 1, v2_mod);
|
| 820 |
+
|
| 821 |
+
// Mark affected edges/vertices as critical for local intersection check
|
| 822 |
+
sa_critical_edge_indices_cache.push_back(edge_idx);
|
| 823 |
+
sa_critical_edge_indices_cache.push_back(edge_idx + 1);
|
| 824 |
+
sa_critical_edge_indices_cache.push_back(edge_idx + 2);
|
| 825 |
+
changed_vertex_indices.clear();
|
| 826 |
+
int Mc = (int)candidate_state.poly.size();
|
| 827 |
+
changed_vertex_indices.push_back((edge_idx + 1) % Mc);
|
| 828 |
+
changed_vertex_indices.push_back((edge_idx + 2) % Mc);
|
| 829 |
+
move_made = true;
|
| 830 |
+
|
| 831 |
+
} else if (candidate_state.poly.size() > 4) { // Simplify Polygon (remove collinear vertex)
|
| 832 |
+
int R_start_idx = rng.next_int(candidate_state.poly.size()); // Random start for search
|
| 833 |
+
bool simplified_this_turn = false;
|
| 834 |
+
for(int k_offset=0; k_offset < candidate_state.poly.size() ; ++k_offset) {
|
| 835 |
+
int current_poly_size_before_erase = candidate_state.poly.size();
|
| 836 |
+
if (current_poly_size_before_erase <= 4) break; // Cannot simplify further
|
| 837 |
+
|
| 838 |
+
int p1_idx = (R_start_idx + k_offset) % current_poly_size_before_erase;
|
| 839 |
+
int p0_idx_old = (p1_idx - 1 + current_poly_size_before_erase) % current_poly_size_before_erase;
|
| 840 |
+
int p2_idx_old = (p1_idx + 1) % current_poly_size_before_erase;
|
| 841 |
+
|
| 842 |
+
const Point& p0 = candidate_state.poly[p0_idx_old];
|
| 843 |
+
const Point& p1 = candidate_state.poly[p1_idx];
|
| 844 |
+
const Point& p2 = candidate_state.poly[p2_idx_old];
|
| 845 |
+
|
| 846 |
+
bool collinear_x = (p0.x == p1.x && p1.x == p2.x);
|
| 847 |
+
bool collinear_y = (p0.y == p1.y && p1.y == p2.y);
|
| 848 |
+
|
| 849 |
+
if (collinear_x || collinear_y) {
|
| 850 |
+
candidate_state.poly.erase(candidate_state.poly.begin() + p1_idx);
|
| 851 |
+
simplified_this_turn = true;
|
| 852 |
+
|
| 853 |
+
int M_cand = candidate_state.poly.size();
|
| 854 |
+
int critical_vertex_idx_in_new_poly;
|
| 855 |
+
// Vertex p0 (at p0_idx_old) forms the new corner. Its index in new poly:
|
| 856 |
+
if (p1_idx == 0) { // If p1 was poly[0], p0 was poly[last]. p0 is now poly[new_last]
|
| 857 |
+
critical_vertex_idx_in_new_poly = M_cand -1;
|
| 858 |
+
} else { // Otherwise, p0's index p1_idx-1 is preserved.
|
| 859 |
+
critical_vertex_idx_in_new_poly = p1_idx - 1;
|
| 860 |
+
}
|
| 861 |
+
|
| 862 |
+
if (!candidate_state.poly.empty()) {
|
| 863 |
+
sa_critical_edge_indices_cache.push_back((critical_vertex_idx_in_new_poly - 1 + M_cand) % M_cand);
|
| 864 |
+
sa_critical_edge_indices_cache.push_back(critical_vertex_idx_in_new_poly);
|
| 865 |
+
sa_critical_edge_indices_cache.push_back((critical_vertex_idx_in_new_poly + 1) % M_cand);
|
| 866 |
+
}
|
| 867 |
+
break; // Simplified one vertex, enough for this turn
|
| 868 |
+
}
|
| 869 |
+
}
|
| 870 |
+
if (!simplified_this_turn) {move_made = false; goto end_move_attempt_label;} // No simplification found/possible
|
| 871 |
+
move_made = true;
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
end_move_attempt_label:; // Label for goto if a move is aborted (e.g. no change)
|
| 875 |
+
if (!move_made) continue; // No valid move attempted or made
|
| 876 |
+
|
| 877 |
+
// Validate candidate polygon
|
| 878 |
+
if (!is_polygon_structurally_sound(candidate_state.poly) || candidate_state.poly.size() < 4 ||
|
| 879 |
+
has_duplicate_vertices_local(candidate_state.poly, changed_vertex_indices)) {
|
| 880 |
+
continue; // Invalid basic structure or duplicate vertices near the modified area
|
| 881 |
+
}
|
| 882 |
+
|
| 883 |
+
if (has_self_intersection_locally(candidate_state.poly, sa_critical_edge_indices_cache)) {
|
| 884 |
+
continue; // Self-intersection found
|
| 885 |
+
}
|
| 886 |
+
|
| 887 |
+
// Accept or reject candidate based on SA criteria
|
| 888 |
+
double candidate_raw_obj_score = candidate_state.get_raw_objective_score();
|
| 889 |
+
double current_raw_obj_score = current_state.get_raw_objective_score();
|
| 890 |
+
double score_diff = candidate_raw_obj_score - current_raw_obj_score;
|
| 891 |
+
|
| 892 |
+
if (score_diff >= 0 || (temperature > 1e-9 && rng.next_double() < std::exp(score_diff / temperature))) {
|
| 893 |
+
current_state = std::move(candidate_state); // Accept move
|
| 894 |
+
current_signed_area = polygon_signed_area_times_2(current_state.poly); // Update signed area
|
| 895 |
+
if (current_signed_area == 0 && !current_state.poly.empty() && current_state.poly.size() >=3) current_signed_area = 1; // Handle degenerate
|
| 896 |
+
|
| 897 |
+
// Keep polygon compact is deferred to final compression for speed.
|
| 898 |
+
|
| 899 |
+
if (current_state.get_objective_score() > best_state.get_objective_score()) {
|
| 900 |
+
best_state = current_state; // New best solution found
|
| 901 |
+
update_best_poly_guides(best_state); // Update dynamic guides
|
| 902 |
+
}
|
| 903 |
+
}
|
| 904 |
+
} // End SA loop
|
| 905 |
+
|
| 906 |
+
// Simplify polygon by removing all collinear intermediate vertices to reduce size/perimeter
|
| 907 |
+
compress_polygon_collinear(best_state.poly);
|
| 908 |
+
|
| 909 |
+
// Final validation of the best found state
|
| 910 |
+
bool needs_reset_to_default = false;
|
| 911 |
+
if (!is_polygon_structurally_sound(best_state.poly) ||
|
| 912 |
+
best_state.poly.size() < 4 ||
|
| 913 |
+
!has_distinct_vertices_unordered(best_state.poly) ||
|
| 914 |
+
check_self_intersection_full(best_state.poly) ) { // Full intersection check on best
|
| 915 |
+
needs_reset_to_default = true;
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
if (needs_reset_to_default) { // If best state is invalid, revert to default
|
| 919 |
+
best_state.poly = default_tiny_poly;
|
| 920 |
+
calculate_score_from_scratch(best_state.poly, best_state.m_count, best_state.s_count);
|
| 921 |
+
}
|
| 922 |
+
|
| 923 |
+
// If best score is 0, check if default polygon gives >0. (max(0, val+1))
|
| 924 |
+
// The score is max(0, M-S+1). So if M-S = -1, score is 0. If M-S = 0, score is 1.
|
| 925 |
+
// If best_state.get_objective_score() == 0, it means M-S+1 <= 0, so M-S <= -1.
|
| 926 |
+
// Default polygon has M=0, S=0, so M-S+1 = 1. Score is 1.
|
| 927 |
+
// So, if best_state score is 0, default is always better (score 1) or equal (if default also somehow gets 0).
|
| 928 |
+
if (best_state.get_objective_score() == 0) {
|
| 929 |
+
// This case implies M-S <= -1 for best_state. Default gives score 1.
|
| 930 |
+
// It's possible that the problem setter implies an empty polygon is not allowed or scores 0.
|
| 931 |
+
// The problem implies outputting a polygon. The default_tiny_poly is a valid polygon.
|
| 932 |
+
// The current logic already handles falling back to default_tiny_poly if the Kadane one is invalid.
|
| 933 |
+
// This check ensures if SA ends up with a 0-score polygon (e.g. captures many sardines),
|
| 934 |
+
// we check if the basic tiny square is better.
|
| 935 |
+
SAState temp_default_state; // Create a temporary default state to calculate its score
|
| 936 |
+
temp_default_state.poly = default_tiny_poly;
|
| 937 |
+
calculate_score_from_scratch(temp_default_state.poly, temp_default_state.m_count, temp_default_state.s_count);
|
| 938 |
+
// If the objectively computed score of the best_state is less than the default one, use default.
|
| 939 |
+
// This is useful if best_state.get_objective_score() became 0 due to M-S+1 <= 0, while default_tiny_poly has M-S+1=1.
|
| 940 |
+
if (best_state.get_objective_score() < temp_default_state.get_objective_score()) {
|
| 941 |
+
best_state = temp_default_state;
|
| 942 |
+
}
|
| 943 |
+
}
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
// Output the best polygon
|
| 947 |
+
std::cout << best_state.poly.size() << "\n";
|
| 948 |
+
for (const auto& p : best_state.poly) {
|
| 949 |
+
std::cout << p.x << " " << p.y << "\n";
|
| 950 |
+
}
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
int main(int argc, char *argv[]) {
|
| 955 |
+
std::ios_base::sync_with_stdio(false);
|
| 956 |
+
std::cin.tie(NULL);
|
| 957 |
+
|
| 958 |
+
// Allow overriding time limit via command line arg, for local testing
|
| 959 |
+
if (argc > 1) {
|
| 960 |
+
try {
|
| 961 |
+
ACTUAL_TIME_LIMIT_SECONDS = std::stod(argv[1]);
|
| 962 |
+
} catch (const std::exception& e) { /* keep default if parse fails */ }
|
| 963 |
+
}
|
| 964 |
+
ACTUAL_TIME_LIMIT_SECONDS -= TIME_LIMIT_SECONDS_SAFETY_MARGIN;
|
| 965 |
+
if (ACTUAL_TIME_LIMIT_SECONDS < 0.2) ACTUAL_TIME_LIMIT_SECONDS = 0.2; // Minimum sensible time limit
|
| 966 |
+
|
| 967 |
+
|
| 968 |
+
sa_critical_edge_indices_cache.reserve(10); // Small, for a few critical edges
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
int N_half; // Number of mackerels (and sardines)
|
| 972 |
+
std::cin >> N_half;
|
| 973 |
+
|
| 974 |
+
all_fish_structs.resize(2 * N_half);
|
| 975 |
+
std::vector<int> fish_indices_for_kdtree(2 * N_half);
|
| 976 |
+
if (2 * N_half > 0) {
|
| 977 |
+
std::iota(fish_indices_for_kdtree.begin(), fish_indices_for_kdtree.end(), 0);
|
| 978 |
+
}
|
| 979 |
+
|
| 980 |
+
// Read mackerels
|
| 981 |
+
for (int i = 0; i < N_half; ++i) {
|
| 982 |
+
std::cin >> all_fish_structs[i].p.x >> all_fish_structs[i].p.y;
|
| 983 |
+
all_fish_structs[i].type = 1;
|
| 984 |
+
}
|
| 985 |
+
// Read sardines
|
| 986 |
+
for (int i = 0; i < N_half; ++i) {
|
| 987 |
+
std::cin >> all_fish_structs[N_half + i].p.x >> all_fish_structs[N_half + i].p.y;
|
| 988 |
+
all_fish_structs[N_half + i].type = -1;
|
| 989 |
+
}
|
| 990 |
+
|
| 991 |
+
// Build KD-tree if there are fish
|
| 992 |
+
if (!all_fish_structs.empty()) {
|
| 993 |
+
fish_kdtree_root = build_kdtree(fish_indices_for_kdtree, 0, (int)all_fish_structs.size() - 1, 0);
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
simulated_annealing_main();
|
| 997 |
+
|
| 998 |
+
// Clean up KD-tree memory
|
| 999 |
+
if (fish_kdtree_root) delete_kdtree(fish_kdtree_root);
|
| 1000 |
+
|
| 1001 |
+
return 0;
|
| 1002 |
+
}
|
| 1003 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale-bench-lite-problems/ahc039/initial_program.cpp
ADDED
|
@@ -0,0 +1,925 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <algorithm>
|
| 5 |
+
#include <chrono>
|
| 6 |
+
#include <random>
|
| 7 |
+
#include <set>
|
| 8 |
+
#include <unordered_set>
|
| 9 |
+
#include <cmath>
|
| 10 |
+
#include <iomanip>
|
| 11 |
+
#include <numeric> // For std::iota
|
| 12 |
+
#include <string>
|
| 13 |
+
#include <map>
|
| 14 |
+
|
| 15 |
+
// === MACROS AND CONSTANTS ===
|
| 16 |
+
const int MAX_COORD_VAL = 100000;
|
| 17 |
+
const int MAX_VERTICES = 1000;
|
| 18 |
+
const int MAX_PERIMETER = 400000;
|
| 19 |
+
const double TIME_LIMIT_SECONDS_SAFETY_MARGIN = 0.1; // Increased safety margin
|
| 20 |
+
double ACTUAL_TIME_LIMIT_SECONDS = 2.0;
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
// === RANDOM NUMBER GENERATION ===
|
| 24 |
+
struct XorShift {
|
| 25 |
+
uint64_t x;
|
| 26 |
+
XorShift() : x(std::chrono::steady_clock::now().time_since_epoch().count() ^ ((uint64_t)std::random_device()() << 32) ^ std::random_device()()) {}
|
| 27 |
+
uint64_t next() {
|
| 28 |
+
x ^= x << 13;
|
| 29 |
+
x ^= x >> 7;
|
| 30 |
+
x ^= x << 17;
|
| 31 |
+
return x;
|
| 32 |
+
}
|
| 33 |
+
int next_int(int n) { if (n <= 0) return 0; return next() % n; }
|
| 34 |
+
int next_int(int a, int b) { if (a > b) return a; return a + next_int(b - a + 1); }
|
| 35 |
+
double next_double() { return next() / (double)UINT64_MAX; }
|
| 36 |
+
};
|
| 37 |
+
XorShift rng;
|
| 38 |
+
|
| 39 |
+
// === TIMER ===
|
| 40 |
+
struct Timer {
|
| 41 |
+
std::chrono::steady_clock::time_point start_time;
|
| 42 |
+
Timer() { reset(); }
|
| 43 |
+
void reset() { start_time = std::chrono::steady_clock::now(); }
|
| 44 |
+
double elapsed() const {
|
| 45 |
+
auto now = std::chrono::steady_clock::now();
|
| 46 |
+
return std::chrono::duration_cast<std::chrono::duration<double>>(now - start_time).count();
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
Timer global_timer;
|
| 50 |
+
|
| 51 |
+
// === GEOMETRIC STRUCTURES ===
|
| 52 |
+
struct Point {
|
| 53 |
+
int x, y;
|
| 54 |
+
bool operator<(const Point& other) const {
|
| 55 |
+
if (x != other.x) return x < other.x;
|
| 56 |
+
return y < other.y;
|
| 57 |
+
}
|
| 58 |
+
bool operator==(const Point& other) const {
|
| 59 |
+
return x == other.x && y == other.y;
|
| 60 |
+
}
|
| 61 |
+
Point operator-(const Point& other) const {
|
| 62 |
+
return {x - other.x, y - other.y};
|
| 63 |
+
}
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
struct PointHash {
|
| 67 |
+
std::size_t operator()(const Point& p) const {
|
| 68 |
+
auto h1 = std::hash<int>{}(p.x);
|
| 69 |
+
auto h2 = std::hash<int>{}(p.y);
|
| 70 |
+
// Combining hashes: simple XOR might not be best, but often good enough.
|
| 71 |
+
// For Point, a common way is boost::hash_combine.
|
| 72 |
+
// h1 ^ (h2 << 1) is a common way that's okay.
|
| 73 |
+
return h1 ^ (h2 << 1);
|
| 74 |
+
}
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
long long cross_product(Point a, Point b) {
|
| 78 |
+
return (long long)a.x * b.y - (long long)a.y * b.x;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
struct Fish {
|
| 82 |
+
Point p;
|
| 83 |
+
int type; // 1 for mackerel, -1 for sardine
|
| 84 |
+
};
|
| 85 |
+
std::vector<Fish> all_fish_structs;
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
// === KD-TREE ===
|
| 89 |
+
struct KDNode {
|
| 90 |
+
Point pt;
|
| 91 |
+
int axis;
|
| 92 |
+
KDNode *left = nullptr, *right = nullptr;
|
| 93 |
+
int fish_struct_idx = -1;
|
| 94 |
+
};
|
| 95 |
+
KDNode* fish_kdtree_root = nullptr;
|
| 96 |
+
std::vector<int> query_rect_indices_cache_kdtree; // Cache for KD-tree query results
|
| 97 |
+
|
| 98 |
+
KDNode* build_kdtree(std::vector<int>& point_indices, int l, int r, int axis) {
|
| 99 |
+
if (l > r) return nullptr;
|
| 100 |
+
int mid = l + (r - l) / 2;
|
| 101 |
+
|
| 102 |
+
std::nth_element(point_indices.begin() + l, point_indices.begin() + mid, point_indices.begin() + r + 1,
|
| 103 |
+
[&](int a_idx, int b_idx) {
|
| 104 |
+
const Point& pa = all_fish_structs[a_idx].p;
|
| 105 |
+
const Point& pb = all_fish_structs[b_idx].p;
|
| 106 |
+
if (axis == 0) return pa.x < pb.x;
|
| 107 |
+
return pa.y < pb.y;
|
| 108 |
+
});
|
| 109 |
+
|
| 110 |
+
KDNode* node = new KDNode();
|
| 111 |
+
node->fish_struct_idx = point_indices[mid];
|
| 112 |
+
node->pt = all_fish_structs[node->fish_struct_idx].p;
|
| 113 |
+
node->axis = axis;
|
| 114 |
+
|
| 115 |
+
node->left = build_kdtree(point_indices, l, mid - 1, 1 - axis);
|
| 116 |
+
node->right = build_kdtree(point_indices, mid + 1, r, 1 - axis);
|
| 117 |
+
return node;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
void query_kdtree_rectangle(KDNode* node, int min_x, int max_x, int min_y, int max_y, std::vector<int>& result_indices) {
|
| 121 |
+
if (!node || min_x > max_x || min_y > max_y) return;
|
| 122 |
+
|
| 123 |
+
const Point& pt = node->pt;
|
| 124 |
+
if (pt.x >= min_x && pt.x <= max_x && pt.y >= min_y && pt.y <= max_y) {
|
| 125 |
+
result_indices.push_back(node->fish_struct_idx);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
if (node->axis == 0) { // Split by X
|
| 129 |
+
if (node->left && min_x <= node->pt.x) query_kdtree_rectangle(node->left, min_x, max_x, min_y, max_y, result_indices);
|
| 130 |
+
if (node->right && max_x >= node->pt.x) query_kdtree_rectangle(node->right, min_x, max_x, min_y, max_y, result_indices);
|
| 131 |
+
} else { // Split by Y
|
| 132 |
+
if (node->left && min_y <= node->pt.y) query_kdtree_rectangle(node->left, min_x, max_x, min_y, max_y, result_indices);
|
| 133 |
+
if (node->right && max_y >= node->pt.y) query_kdtree_rectangle(node->right, min_x, max_x, min_y, max_y, result_indices);
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
void delete_kdtree(KDNode* node) { // Recursively delete KD-tree nodes
|
| 138 |
+
if (!node) return;
|
| 139 |
+
delete_kdtree(node->left);
|
| 140 |
+
delete_kdtree(node->right);
|
| 141 |
+
delete node;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
// === POLYGON UTILITIES ===
|
| 146 |
+
long long calculate_perimeter(const std::vector<Point>& poly) {
|
| 147 |
+
if (poly.size() < 2) return 0;
|
| 148 |
+
long long perimeter = 0;
|
| 149 |
+
for (size_t i = 0; i < poly.size(); ++i) {
|
| 150 |
+
const Point& p1 = poly[i];
|
| 151 |
+
const Point& p2 = poly[(i + 1) % poly.size()];
|
| 152 |
+
perimeter += std::abs(p1.x - p2.x) + std::abs(p1.y - p2.y);
|
| 153 |
+
}
|
| 154 |
+
return perimeter;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
bool is_on_segment(Point p, Point seg_a, Point seg_b) {
|
| 158 |
+
if (cross_product(seg_b - seg_a, p - seg_a) != 0) return false; // Not collinear
|
| 159 |
+
return std::min(seg_a.x, seg_b.x) <= p.x && p.x <= std::max(seg_a.x, seg_b.x) &&
|
| 160 |
+
std::min(seg_a.y, seg_b.y) <= p.y && p.y <= std::max(seg_a.y, seg_b.y);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
bool is_inside_polygon_wn(Point p, const std::vector<Point>& polygon) {
|
| 164 |
+
int n = polygon.size();
|
| 165 |
+
if (n < 3) return false;
|
| 166 |
+
|
| 167 |
+
// Check if on boundary first
|
| 168 |
+
for (int i = 0; i < n; ++i) {
|
| 169 |
+
if (is_on_segment(p, polygon[i], polygon[(i + 1) % n])) return true;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
int wn = 0; // Winding number
|
| 173 |
+
for (int i = 0; i < n; ++i) {
|
| 174 |
+
Point p1 = polygon[i];
|
| 175 |
+
Point p2 = polygon[(i + 1) % n];
|
| 176 |
+
if (p1.y <= p.y) { // Start y <= P.y
|
| 177 |
+
if (p2.y > p.y && cross_product(p2 - p1, p - p1) > 0) { // An upward crossing, P is left of edge
|
| 178 |
+
wn++;
|
| 179 |
+
}
|
| 180 |
+
} else { // Start y > P.y
|
| 181 |
+
if (p2.y <= p.y && cross_product(p2 - p1, p - p1) < 0) { // A downward crossing, P is right of edge
|
| 182 |
+
wn--;
|
| 183 |
+
}
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
return wn != 0; // wn != 0 means inside; wn == 0 means outside.
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
// Calculate score from scratch by checking all fish
|
| 190 |
+
void calculate_score_from_scratch(const std::vector<Point>& poly, int& m_count, int& s_count) {
|
| 191 |
+
m_count = 0; s_count = 0;
|
| 192 |
+
if (poly.size() < 3) return; // Not a valid polygon for containment
|
| 193 |
+
for (const auto& fish_s : all_fish_structs) {
|
| 194 |
+
if (is_inside_polygon_wn(fish_s.p, poly)) {
|
| 195 |
+
if (fish_s.type == 1) m_count++;
|
| 196 |
+
else s_count++;
|
| 197 |
+
}
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
// Calculate fish counts in a given rectangle using KD-tree
|
| 202 |
+
void calculate_score_delta_for_rectangle(int r_min_x, int r_max_x, int r_min_y, int r_max_y,
|
| 203 |
+
int& delta_m, int& delta_s) {
|
| 204 |
+
delta_m = 0; delta_s = 0;
|
| 205 |
+
query_rect_indices_cache_kdtree.clear();
|
| 206 |
+
|
| 207 |
+
if(!fish_kdtree_root || r_min_x > r_max_x || r_min_y > r_max_y) { // Invalid rectangle
|
| 208 |
+
return;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
query_kdtree_rectangle(fish_kdtree_root, r_min_x, r_max_x, r_min_y, r_max_y, query_rect_indices_cache_kdtree);
|
| 212 |
+
|
| 213 |
+
for (int fish_struct_idx : query_rect_indices_cache_kdtree) {
|
| 214 |
+
if (all_fish_structs[fish_struct_idx].type == 1) delta_m++;
|
| 215 |
+
else delta_s++;
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
// Check intersection between two orthogonal segments p1s-p1e and p2s-p2e
|
| 220 |
+
bool segments_intersect(Point p1s, Point p1e, Point p2s, Point p2e) {
|
| 221 |
+
// Normalize segments (sort endpoints to simplify overlap checks)
|
| 222 |
+
if (p1s.x == p1e.x) { if (p1s.y > p1e.y) std::swap(p1s.y, p1e.y); } // Vertical, sort by y
|
| 223 |
+
else { if (p1s.x > p1e.x) std::swap(p1s.x, p1e.x); } // Horizontal, sort by x
|
| 224 |
+
if (p2s.x == p2e.x) { if (p2s.y > p2e.y) std::swap(p2s.y, p2e.y); }
|
| 225 |
+
else { if (p2s.x > p2e.x) std::swap(p2s.x, p2e.x); }
|
| 226 |
+
|
| 227 |
+
bool seg1_is_H = (p1s.y == p1e.y);
|
| 228 |
+
bool seg2_is_H = (p2s.y == p2e.y);
|
| 229 |
+
|
| 230 |
+
if (seg1_is_H == seg2_is_H) { // Both horizontal or both vertical
|
| 231 |
+
if (seg1_is_H) { // Both horizontal
|
| 232 |
+
// Check for y-alignment and x-overlap
|
| 233 |
+
return p1s.y == p2s.y && std::max(p1s.x, p2s.x) <= std::min(p1e.x, p2e.x);
|
| 234 |
+
} else { // Both vertical
|
| 235 |
+
// Check for x-alignment and y-overlap
|
| 236 |
+
return p1s.x == p2s.x && std::max(p1s.y, p2s.y) <= std::min(p1e.y, p2e.y);
|
| 237 |
+
}
|
| 238 |
+
} else { // One horizontal, one vertical (potential T-junction or cross)
|
| 239 |
+
Point h_s = seg1_is_H ? p1s : p2s; Point h_e = seg1_is_H ? p1e : p2e;
|
| 240 |
+
Point v_s = seg1_is_H ? p2s : p1s; Point v_e = seg1_is_H ? p2e : p1e;
|
| 241 |
+
// Check if intersection point (v_s.x, h_s.y) lies on both segments
|
| 242 |
+
return v_s.x >= h_s.x && v_s.x <= h_e.x && // x_intersect within horizontal segment's x-range
|
| 243 |
+
h_s.y >= v_s.y && h_s.y <= v_e.y; // y_intersect within vertical segment's y-range
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
bool check_self_intersection_full(const std::vector<Point>& poly) {
|
| 248 |
+
int M = poly.size();
|
| 249 |
+
if (M < 4) return false;
|
| 250 |
+
for (int i = 0; i < M; ++i) {
|
| 251 |
+
Point p1s = poly[i];
|
| 252 |
+
Point p1e = poly[(i + 1) % M];
|
| 253 |
+
for (int j = i + 2; j < M; ++j) {
|
| 254 |
+
// Skip checking adjacent edges.
|
| 255 |
+
// Edge i is (poly[i], poly[(i+1)%M]). Edge j is (poly[j], poly[(j+1)%M]).
|
| 256 |
+
// If i=0 and j=M-1, then edge i is (poly[0], poly[1]) and edge j is (poly[M-1], poly[0]). These are adjacent.
|
| 257 |
+
if (i == 0 && j == M - 1) continue;
|
| 258 |
+
|
| 259 |
+
Point p2s = poly[j];
|
| 260 |
+
Point p2e = poly[(j + 1) % M];
|
| 261 |
+
if (segments_intersect(p1s, p1e, p2s, p2e)) return true;
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
return false;
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
// Local self-intersection check: checks edges starting at critical_edge_start_indices_const against all others
|
| 268 |
+
bool has_self_intersection_locally(const std::vector<Point>& poly, const std::vector<int>& critical_edge_start_indices_const) {
|
| 269 |
+
int M = poly.size();
|
| 270 |
+
if (M < 4) return false;
|
| 271 |
+
|
| 272 |
+
std::vector<int> critical_indices = critical_edge_start_indices_const; // Make a copy to modify
|
| 273 |
+
if (critical_indices.empty()) {
|
| 274 |
+
return false;
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
std::sort(critical_indices.begin(), critical_indices.end());
|
| 278 |
+
critical_indices.erase(std::unique(critical_indices.begin(), critical_indices.end()), critical_indices.end());
|
| 279 |
+
|
| 280 |
+
for (int edge1_s_idx_val_orig : critical_indices) {
|
| 281 |
+
int edge1_s_idx_val = (edge1_s_idx_val_orig % M + M) % M; // Ensure positive modulo
|
| 282 |
+
// No need to check edge1_s_idx_val bounds, it will be in [0, M-1]
|
| 283 |
+
|
| 284 |
+
Point p1s = poly[edge1_s_idx_val];
|
| 285 |
+
Point p1e = poly[(edge1_s_idx_val + 1) % M];
|
| 286 |
+
|
| 287 |
+
for (int edge2_s_idx = 0; edge2_s_idx < M; ++edge2_s_idx) {
|
| 288 |
+
bool is_adj_or_same_to_p1s_p1e = (edge2_s_idx == edge1_s_idx_val || // Same edge
|
| 289 |
+
edge2_s_idx == (edge1_s_idx_val + 1) % M || // edge2 starts where edge1 ends
|
| 290 |
+
(edge2_s_idx + 1) % M == edge1_s_idx_val); // edge2 ends where edge1 starts
|
| 291 |
+
if (is_adj_or_same_to_p1s_p1e) continue;
|
| 292 |
+
|
| 293 |
+
Point p2s = poly[edge2_s_idx];
|
| 294 |
+
Point p2e = poly[(edge2_s_idx + 1) % M];
|
| 295 |
+
if (segments_intersect(p1s, p1e, p2s, p2e)) {
|
| 296 |
+
return true;
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
return false;
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
bool has_distinct_vertices_unordered(const std::vector<Point>& poly) {
|
| 305 |
+
if (poly.empty()) return true;
|
| 306 |
+
std::unordered_set<Point, PointHash> distinct_pts;
|
| 307 |
+
distinct_pts.reserve(poly.size()); // Pre-allocate for efficiency
|
| 308 |
+
for(const auto& p : poly) {
|
| 309 |
+
if (!distinct_pts.insert(p).second) return false; // Insertion failed, duplicate found
|
| 310 |
+
}
|
| 311 |
+
return true;
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
// Check basic structural validity of the polygon
|
| 315 |
+
bool is_polygon_structurally_sound(const std::vector<Point>& poly) {
|
| 316 |
+
int m = poly.size();
|
| 317 |
+
if (m != 0 && (m < 4 || m > MAX_VERTICES)) return false;
|
| 318 |
+
if (m == 0) return true;
|
| 319 |
+
|
| 320 |
+
if (calculate_perimeter(poly) > MAX_PERIMETER) return false;
|
| 321 |
+
|
| 322 |
+
for (size_t i = 0; i < m; ++i) {
|
| 323 |
+
const Point& p1 = poly[i];
|
| 324 |
+
const Point& p2 = poly[(i + 1) % m];
|
| 325 |
+
// Check coordinate bounds for p1
|
| 326 |
+
if (p1.x < 0 || p1.x > MAX_COORD_VAL || p1.y < 0 || p1.y > MAX_COORD_VAL) return false;
|
| 327 |
+
// p2 is poly[(i+1)%m]. This check is implicitly done for p2 when it becomes p1,
|
| 328 |
+
// except for the very last p2 which is poly[0]. poly[0] is checked as p1 in its iteration.
|
| 329 |
+
// The original code had an explicit check for poly[(i+1)%m] too, which is redundant but harmless.
|
| 330 |
+
// Let's keep it for safety/clarity.
|
| 331 |
+
if (poly[(i+1)%m].x < 0 || poly[(i+1)%m].x > MAX_COORD_VAL || poly[(i+1)%m].y < 0 || poly[(i+1)%m].y > MAX_COORD_VAL) return false;
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
// Check axis-parallel and non-zero length edges
|
| 335 |
+
if (p1.x != p2.x && p1.y != p2.y) return false; // Not axis-parallel
|
| 336 |
+
if (p1.x == p2.x && p1.y == p2.y) return false; // Zero-length edge (duplicate consecutive vertices)
|
| 337 |
+
}
|
| 338 |
+
return true;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
// Initial polygon generation using Kadane's algorithm on a coarse grid
|
| 342 |
+
std::vector<Point> create_initial_polygon_kadane() {
|
| 343 |
+
const int GRID_SIZE_KADANE = 350; // Tunable parameter
|
| 344 |
+
const int NUM_VALUES_KADANE = MAX_COORD_VAL + 1;
|
| 345 |
+
// Ensure ACTUAL_CELL_DIM_KADANE is at least 1
|
| 346 |
+
const int ACTUAL_CELL_DIM_KADANE = std::max(1, (NUM_VALUES_KADANE + GRID_SIZE_KADANE - 1) / GRID_SIZE_KADANE);
|
| 347 |
+
|
| 348 |
+
std::vector<std::vector<long long>> grid_scores(GRID_SIZE_KADANE, std::vector<long long>(GRID_SIZE_KADANE, 0));
|
| 349 |
+
for (const auto& fish_s : all_fish_structs) {
|
| 350 |
+
int r = fish_s.p.y / ACTUAL_CELL_DIM_KADANE;
|
| 351 |
+
int c = fish_s.p.x / ACTUAL_CELL_DIM_KADANE;
|
| 352 |
+
r = std::min(r, GRID_SIZE_KADANE - 1); r = std::max(r,0);
|
| 353 |
+
c = std::min(c, GRID_SIZE_KADANE - 1); c = std::max(c,0);
|
| 354 |
+
grid_scores[r][c] += fish_s.type; // Mackerel +1, Sardine -1
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
long long max_so_far = -3e18; // Sufficiently small number
|
| 358 |
+
int best_r1 = 0, best_c1 = 0, best_r2 = -1, best_c2 = -1;
|
| 359 |
+
|
| 360 |
+
// 2D Kadane's algorithm
|
| 361 |
+
for (int c1_idx = 0; c1_idx < GRID_SIZE_KADANE; ++c1_idx) {
|
| 362 |
+
std::vector<long long> col_strip_sum(GRID_SIZE_KADANE, 0);
|
| 363 |
+
for (int c2_idx = c1_idx; c2_idx < GRID_SIZE_KADANE; ++c2_idx) {
|
| 364 |
+
for (int r_idx = 0; r_idx < GRID_SIZE_KADANE; ++r_idx) {
|
| 365 |
+
col_strip_sum[r_idx] += grid_scores[r_idx][c2_idx];
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
// 1D Kadane's on col_strip_sum
|
| 369 |
+
long long current_strip_val = 0;
|
| 370 |
+
int current_r1_1d = 0;
|
| 371 |
+
for (int r2_idx_1d = 0; r2_idx_1d < GRID_SIZE_KADANE; ++r2_idx_1d) {
|
| 372 |
+
long long val_here = col_strip_sum[r2_idx_1d];
|
| 373 |
+
if (current_strip_val > 0 && current_strip_val + val_here > 0) { // Extend if sum remains positive
|
| 374 |
+
current_strip_val += val_here;
|
| 375 |
+
} else { // Start new subarray
|
| 376 |
+
current_strip_val = val_here;
|
| 377 |
+
current_r1_1d = r2_idx_1d;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
if (current_strip_val > max_so_far) {
|
| 381 |
+
max_so_far = current_strip_val;
|
| 382 |
+
best_r1 = current_r1_1d;
|
| 383 |
+
best_r2 = r2_idx_1d;
|
| 384 |
+
best_c1 = c1_idx;
|
| 385 |
+
best_c2 = c2_idx;
|
| 386 |
+
}
|
| 387 |
+
}
|
| 388 |
+
}
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
std::vector<Point> default_poly = {{0,0}, {1,0}, {1,1}, {0,1}}; // Minimal valid polygon
|
| 392 |
+
|
| 393 |
+
// If no positive sum found, or issue, find best single cell
|
| 394 |
+
if (best_r2 == -1 || max_so_far <=0 ) {
|
| 395 |
+
max_so_far = -3e18; // Reset search for single best cell
|
| 396 |
+
bool found_cell = false;
|
| 397 |
+
for(int r=0; r<GRID_SIZE_KADANE; ++r) for(int c=0; c<GRID_SIZE_KADANE; ++c) {
|
| 398 |
+
if(grid_scores[r][c] > max_so_far) {
|
| 399 |
+
max_so_far = grid_scores[r][c];
|
| 400 |
+
best_r1 = r; best_r2 = r; // Single cell
|
| 401 |
+
best_c1 = c; best_c2 = c;
|
| 402 |
+
found_cell = true;
|
| 403 |
+
}
|
| 404 |
+
}
|
| 405 |
+
if (!found_cell || max_so_far <=0) return default_poly; // Still no good cell, return default
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
// Convert grid cell indices to actual coordinates
|
| 409 |
+
int x_start = best_c1 * ACTUAL_CELL_DIM_KADANE;
|
| 410 |
+
int y_start = best_r1 * ACTUAL_CELL_DIM_KADANE;
|
| 411 |
+
int x_end = (best_c2 + 1) * ACTUAL_CELL_DIM_KADANE -1;
|
| 412 |
+
int y_end = (best_r2 + 1) * ACTUAL_CELL_DIM_KADANE -1;
|
| 413 |
+
|
| 414 |
+
// Clamp coordinates to valid range
|
| 415 |
+
x_start = std::max(0, std::min(MAX_COORD_VAL, x_start));
|
| 416 |
+
y_start = std::max(0, std::min(MAX_COORD_VAL, y_start));
|
| 417 |
+
x_end = std::max(x_start, std::min(MAX_COORD_VAL, x_end)); // Ensure x_end >= x_start
|
| 418 |
+
y_end = std::max(y_start, std::min(MAX_COORD_VAL, y_end)); // Ensure y_end >= y_start
|
| 419 |
+
|
| 420 |
+
// Ensure non-zero dimensions for the polygon, minimum 1x1 actual area
|
| 421 |
+
if (x_start == x_end) {
|
| 422 |
+
if (x_start < MAX_COORD_VAL) x_end = x_start + 1;
|
| 423 |
+
else if (x_start > 0) x_start = x_start -1; // Can't expand right, try expand left
|
| 424 |
+
else return default_poly; // Single point at MAX_COORD_VAL, cannot form 1x1
|
| 425 |
+
}
|
| 426 |
+
if (y_start == y_end) {
|
| 427 |
+
if (y_start < MAX_COORD_VAL) y_end = y_start + 1;
|
| 428 |
+
else if (y_start > 0) y_start = y_start - 1;
|
| 429 |
+
else return default_poly;
|
| 430 |
+
}
|
| 431 |
+
// After adjustment, if still degenerate, use default. This is rare.
|
| 432 |
+
if (x_start == x_end || y_start == y_end) return default_poly;
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
std::vector<Point> initial_poly = {
|
| 436 |
+
{x_start, y_start}, {x_end, y_start}, {x_end, y_end}, {x_start, y_end}
|
| 437 |
+
};
|
| 438 |
+
return initial_poly;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
// === SIMULATED ANNEALING ===
|
| 442 |
+
struct SAState {
|
| 443 |
+
std::vector<Point> poly;
|
| 444 |
+
int m_count;
|
| 445 |
+
int s_count;
|
| 446 |
+
|
| 447 |
+
SAState() : m_count(0), s_count(0) {}
|
| 448 |
+
|
| 449 |
+
long long get_objective_score() const {
|
| 450 |
+
return std::max(0LL, (long long)m_count - s_count + 1);
|
| 451 |
+
}
|
| 452 |
+
double get_raw_objective_score() const { // Used for SA acceptance probability
|
| 453 |
+
return (double)m_count - s_count;
|
| 454 |
+
}
|
| 455 |
+
};
|
| 456 |
+
|
| 457 |
+
// Calculates signed area * 2 of a polygon (shoelace formula)
|
| 458 |
+
long long polygon_signed_area_times_2(const std::vector<Point>& poly) {
|
| 459 |
+
if (poly.size() < 3) return 0;
|
| 460 |
+
long long area_sum = 0;
|
| 461 |
+
for (size_t i = 0; i < poly.size(); ++i) {
|
| 462 |
+
const Point& p1 = poly[i];
|
| 463 |
+
const Point& p2 = poly[(i + 1) % poly.size()];
|
| 464 |
+
area_sum += (long long)(p1.x - p2.x) * (p1.y + p2.y); // (x1-x2)(y1+y2) variant
|
| 465 |
+
}
|
| 466 |
+
return area_sum; // Positive for CCW, negative for CW
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
std::vector<int> sa_critical_edge_indices_cache; // Cache for local intersection check
|
| 470 |
+
|
| 471 |
+
// Guide coordinates for SA moves
|
| 472 |
+
std::vector<int> static_x_guides;
|
| 473 |
+
std::vector<int> static_y_guides;
|
| 474 |
+
std::vector<int> best_poly_x_guides;
|
| 475 |
+
std::vector<int> best_poly_y_guides;
|
| 476 |
+
|
| 477 |
+
void update_best_poly_guides(const SAState& new_best_state) {
|
| 478 |
+
best_poly_x_guides.clear();
|
| 479 |
+
best_poly_y_guides.clear();
|
| 480 |
+
if (new_best_state.poly.empty()) return;
|
| 481 |
+
|
| 482 |
+
std::set<int> temp_x_set, temp_y_set;
|
| 483 |
+
for (const auto& p : new_best_state.poly) {
|
| 484 |
+
temp_x_set.insert(p.x);
|
| 485 |
+
temp_y_set.insert(p.y);
|
| 486 |
+
}
|
| 487 |
+
best_poly_x_guides.assign(temp_x_set.begin(), temp_x_set.end());
|
| 488 |
+
best_poly_y_guides.assign(temp_y_set.begin(), temp_y_set.end());
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
void simulated_annealing_main() {
|
| 493 |
+
SAState current_state;
|
| 494 |
+
current_state.poly = create_initial_polygon_kadane();
|
| 495 |
+
calculate_score_from_scratch(current_state.poly, current_state.m_count, current_state.s_count);
|
| 496 |
+
|
| 497 |
+
std::vector<Point> default_tiny_poly = {{0,0}, {1,0}, {1,1}, {0,1}};
|
| 498 |
+
|
| 499 |
+
// Ensure initial polygon is valid, otherwise use default
|
| 500 |
+
bool current_poly_initial_valid = is_polygon_structurally_sound(current_state.poly) &&
|
| 501 |
+
current_state.poly.size() >= 4 &&
|
| 502 |
+
has_distinct_vertices_unordered(current_state.poly) &&
|
| 503 |
+
!check_self_intersection_full(current_state.poly);
|
| 504 |
+
|
| 505 |
+
if (!current_poly_initial_valid) {
|
| 506 |
+
current_state.poly = default_tiny_poly;
|
| 507 |
+
calculate_score_from_scratch(current_state.poly, current_state.m_count, current_state.s_count);
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
SAState best_state = current_state;
|
| 511 |
+
update_best_poly_guides(best_state);
|
| 512 |
+
|
| 513 |
+
// Prepare static guide coordinates from fish locations
|
| 514 |
+
std::set<int> sx_set, sy_set;
|
| 515 |
+
for(const auto& f_s : all_fish_structs) {
|
| 516 |
+
sx_set.insert(f_s.p.x); sx_set.insert(std::max(0,f_s.p.x-1)); sx_set.insert(std::min(MAX_COORD_VAL, f_s.p.x+1));
|
| 517 |
+
sy_set.insert(f_s.p.y); sy_set.insert(std::max(0,f_s.p.y-1)); sy_set.insert(std::min(MAX_COORD_VAL, f_s.p.y+1));
|
| 518 |
+
}
|
| 519 |
+
sx_set.insert(0); sx_set.insert(MAX_COORD_VAL); // Boundary guides
|
| 520 |
+
sy_set.insert(0); sy_set.insert(MAX_COORD_VAL);
|
| 521 |
+
|
| 522 |
+
static_x_guides.assign(sx_set.begin(), sx_set.end());
|
| 523 |
+
static_y_guides.assign(sy_set.begin(), sy_set.end());
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
double start_temp = 150.0;
|
| 527 |
+
double end_temp = 0.01;
|
| 528 |
+
|
| 529 |
+
long long current_signed_area = polygon_signed_area_times_2(current_state.poly);
|
| 530 |
+
if (current_signed_area == 0 && current_state.poly.size() >=3) {
|
| 531 |
+
current_signed_area = 1; // Avoid issues with zero area for sign logic
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
sa_critical_edge_indices_cache.reserve(10); // Max expected critical edges for current moves
|
| 535 |
+
|
| 536 |
+
while (global_timer.elapsed() < ACTUAL_TIME_LIMIT_SECONDS) {
|
| 537 |
+
double time_ratio = global_timer.elapsed() / ACTUAL_TIME_LIMIT_SECONDS;
|
| 538 |
+
double temperature = start_temp * std::pow(end_temp / start_temp, time_ratio);
|
| 539 |
+
// Fine-tune temperature near end or if it drops too fast
|
| 540 |
+
if (temperature < end_temp && time_ratio < 0.95) temperature = end_temp;
|
| 541 |
+
if (time_ratio > 0.95 && temperature > end_temp * 0.1) temperature = end_temp * 0.1; // Lower temp aggressively at the very end
|
| 542 |
+
|
| 543 |
+
if (current_state.poly.size() < 4) { // Should not happen if logic is correct, but as a safeguard
|
| 544 |
+
current_state.poly = default_tiny_poly;
|
| 545 |
+
calculate_score_from_scratch(current_state.poly, current_state.m_count, current_state.s_count);
|
| 546 |
+
current_signed_area = polygon_signed_area_times_2(current_state.poly);
|
| 547 |
+
if (current_signed_area == 0 && current_state.poly.size() >=3) current_signed_area = 1;
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
SAState candidate_state = current_state;
|
| 551 |
+
sa_critical_edge_indices_cache.clear();
|
| 552 |
+
|
| 553 |
+
int move_type_roll = rng.next_int(100);
|
| 554 |
+
// Base probabilities for moves
|
| 555 |
+
int move_edge_prob = 48;
|
| 556 |
+
int add_bulge_prob = 24;
|
| 557 |
+
// Remaining probability for simplify polygon move
|
| 558 |
+
|
| 559 |
+
long long current_poly_perimeter_cached = 0;
|
| 560 |
+
bool check_limits = (candidate_state.poly.size() > 200 || candidate_state.poly.size() > MAX_VERTICES - 20);
|
| 561 |
+
if (check_limits && candidate_state.poly.size() > 200) {
|
| 562 |
+
// Only calculate perimeter if near limits and already large, it's somewhat expensive
|
| 563 |
+
current_poly_perimeter_cached = calculate_perimeter(candidate_state.poly);
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
// Adjust move probabilities based on polygon size/perimeter
|
| 567 |
+
if (candidate_state.poly.size() + 2 > MAX_VERTICES || (check_limits && current_poly_perimeter_cached > MAX_PERIMETER * 0.95)) { // If adding bulge would exceed max vertices
|
| 568 |
+
move_edge_prob = 45; add_bulge_prob = 0; // Heavily restrict adding vertices
|
| 569 |
+
} else if (candidate_state.poly.size() > 200 || (check_limits && current_poly_perimeter_cached > MAX_PERIMETER * 0.9)) {
|
| 570 |
+
move_edge_prob = 40; add_bulge_prob = 15;
|
| 571 |
+
} else if (candidate_state.poly.size() > 50) {
|
| 572 |
+
move_edge_prob = 45; add_bulge_prob = 20;
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
bool move_made = false;
|
| 576 |
+
|
| 577 |
+
// Probabilities for snapping to guide coordinates
|
| 578 |
+
double prob_dynamic_guide_snap = 0.20 + 0.20 * time_ratio;
|
| 579 |
+
double prob_static_guide_snap_if_not_dynamic = 0.75;
|
| 580 |
+
|
| 581 |
+
if (move_type_roll < move_edge_prob && candidate_state.poly.size() >= 4 ) { // Move Edge
|
| 582 |
+
int edge_idx = rng.next_int(candidate_state.poly.size());
|
| 583 |
+
Point p1_orig = candidate_state.poly[edge_idx];
|
| 584 |
+
Point p2_orig = candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()];
|
| 585 |
+
|
| 586 |
+
int new_coord_val = -1;
|
| 587 |
+
int cur_delta_m=0, cur_delta_s=0;
|
| 588 |
+
bool coord_selected_successfully = false;
|
| 589 |
+
|
| 590 |
+
// Determine which guides are relevant (X or Y)
|
| 591 |
+
const std::vector<int>* relevant_dyn_guides = (p1_orig.x == p2_orig.x) ? &best_poly_x_guides : &best_poly_y_guides;
|
| 592 |
+
const std::vector<int>* relevant_static_guides = (p1_orig.x == p2_orig.x) ? &static_x_guides : &static_y_guides;
|
| 593 |
+
|
| 594 |
+
// Try snapping to dynamic (best poly) guides
|
| 595 |
+
if (!relevant_dyn_guides->empty() && rng.next_double() < prob_dynamic_guide_snap) {
|
| 596 |
+
new_coord_val = (*relevant_dyn_guides)[rng.next_int(relevant_dyn_guides->size())];
|
| 597 |
+
coord_selected_successfully = true;
|
| 598 |
+
}
|
| 599 |
+
// If not, try snapping to static (fish) guides
|
| 600 |
+
if (!coord_selected_successfully) {
|
| 601 |
+
if (!relevant_static_guides->empty() && rng.next_double() < prob_static_guide_snap_if_not_dynamic) {
|
| 602 |
+
new_coord_val = (*relevant_static_guides)[rng.next_int(relevant_static_guides->size())];
|
| 603 |
+
coord_selected_successfully = true;
|
| 604 |
+
}
|
| 605 |
+
}
|
| 606 |
+
// If still not selected, use random displacement
|
| 607 |
+
if (!coord_selected_successfully) {
|
| 608 |
+
double step_factor = std::max(0.1, 1.0 - time_ratio * 0.95); // Step size decreases over time
|
| 609 |
+
int base_step_max = std::max(1, (int)( (MAX_COORD_VAL/150.0) * step_factor + 1 ) );
|
| 610 |
+
int random_displacement = rng.next_int(-base_step_max, base_step_max);
|
| 611 |
+
if (time_ratio > 0.75 && rng.next_double() < 0.7) { // Very small steps near end
|
| 612 |
+
random_displacement = rng.next_int(-2,2);
|
| 613 |
+
}
|
| 614 |
+
if (random_displacement == 0) random_displacement = (rng.next_double() < 0.5) ? -1:1;
|
| 615 |
+
|
| 616 |
+
if (p1_orig.x == p2_orig.x) new_coord_val = p1_orig.x + random_displacement; // Vertical edge, move X
|
| 617 |
+
else new_coord_val = p1_orig.y + random_displacement; // Horizontal edge, move Y
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
new_coord_val = std::max(0, std::min(MAX_COORD_VAL, new_coord_val)); // Clamp to bounds
|
| 621 |
+
|
| 622 |
+
if (p1_orig.x == p2_orig.x) { // Vertical edge: (X_orig, Y_s) to (X_orig, Y_e)
|
| 623 |
+
if (new_coord_val == p1_orig.x) {move_made = false; goto end_move_attempt_label;} // No change
|
| 624 |
+
|
| 625 |
+
int query_min_x, query_max_x;
|
| 626 |
+
if (new_coord_val > p1_orig.x) { // Moved right
|
| 627 |
+
query_min_x = p1_orig.x + 1;
|
| 628 |
+
query_max_x = new_coord_val;
|
| 629 |
+
} else { // Moved left (new_coord_val < p1_orig.x)
|
| 630 |
+
query_min_x = new_coord_val;
|
| 631 |
+
query_max_x = p1_orig.x - 1;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
calculate_score_delta_for_rectangle(
|
| 635 |
+
query_min_x, query_max_x,
|
| 636 |
+
std::min(p1_orig.y, p2_orig.y), std::max(p1_orig.y, p2_orig.y),
|
| 637 |
+
cur_delta_m, cur_delta_s);
|
| 638 |
+
|
| 639 |
+
int sign = (new_coord_val > p1_orig.x) ? 1 : -1; // Moving right is positive X change
|
| 640 |
+
if (p1_orig.y > p2_orig.y) sign *= -1; // Correct for edge Y-direction (p1_orig.y to p2_orig.y)
|
| 641 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon (area < 0)
|
| 642 |
+
|
| 643 |
+
candidate_state.poly[edge_idx].x = new_coord_val;
|
| 644 |
+
candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()].x = new_coord_val;
|
| 645 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 646 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 647 |
+
} else { // Horizontal edge: (X_s, Y_orig) to (X_e, Y_orig)
|
| 648 |
+
if (new_coord_val == p1_orig.y) {move_made = false; goto end_move_attempt_label;} // No change
|
| 649 |
+
|
| 650 |
+
int query_min_y, query_max_y;
|
| 651 |
+
if (new_coord_val > p1_orig.y) { // Moved up (Y increases)
|
| 652 |
+
query_min_y = p1_orig.y + 1;
|
| 653 |
+
query_max_y = new_coord_val;
|
| 654 |
+
} else { // Moved down (Y decreases, new_coord_val < p1_orig.y)
|
| 655 |
+
query_min_y = new_coord_val;
|
| 656 |
+
query_max_y = p1_orig.y - 1;
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
calculate_score_delta_for_rectangle(
|
| 660 |
+
std::min(p1_orig.x, p2_orig.x), std::max(p1_orig.x, p2_orig.x),
|
| 661 |
+
query_min_y, query_max_y,
|
| 662 |
+
cur_delta_m, cur_delta_s);
|
| 663 |
+
|
| 664 |
+
int sign = (new_coord_val < p1_orig.y) ? 1 : -1; // Moving "down" (Y decreases) means positive sign if it expands area
|
| 665 |
+
if (p1_orig.x > p2_orig.x) sign *= -1; // Correct for edge X-direction (p1_orig.x to p2_orig.x)
|
| 666 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon
|
| 667 |
+
|
| 668 |
+
candidate_state.poly[edge_idx].y = new_coord_val;
|
| 669 |
+
candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()].y = new_coord_val;
|
| 670 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 671 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 672 |
+
}
|
| 673 |
+
int M_cand = candidate_state.poly.size();
|
| 674 |
+
sa_critical_edge_indices_cache.push_back((edge_idx - 1 + M_cand) % M_cand);
|
| 675 |
+
sa_critical_edge_indices_cache.push_back(edge_idx);
|
| 676 |
+
sa_critical_edge_indices_cache.push_back((edge_idx + 1) % M_cand);
|
| 677 |
+
move_made = true;
|
| 678 |
+
|
| 679 |
+
} else if (move_type_roll < move_edge_prob + add_bulge_prob && candidate_state.poly.size() + 2 <= MAX_VERTICES && candidate_state.poly.size() >=4) { // Add Bulge
|
| 680 |
+
int edge_idx = rng.next_int(candidate_state.poly.size());
|
| 681 |
+
Point p_s = candidate_state.poly[edge_idx]; // Start point of edge
|
| 682 |
+
Point p_e = candidate_state.poly[(edge_idx + 1) % candidate_state.poly.size()]; // End point of edge
|
| 683 |
+
|
| 684 |
+
int new_coord_val = -1;
|
| 685 |
+
bool coord_selected_successfully = false;
|
| 686 |
+
|
| 687 |
+
const std::vector<int>* relevant_dyn_guides = (p_s.x == p_e.x) ? &best_poly_x_guides : &best_poly_y_guides;
|
| 688 |
+
const std::vector<int>* relevant_static_guides = (p_s.x == p_e.x) ? &static_x_guides : &static_y_guides;
|
| 689 |
+
|
| 690 |
+
// Try snapping bulge coord
|
| 691 |
+
if (!relevant_dyn_guides->empty() && rng.next_double() < prob_dynamic_guide_snap) {
|
| 692 |
+
new_coord_val = (*relevant_dyn_guides)[rng.next_int(relevant_dyn_guides->size())];
|
| 693 |
+
coord_selected_successfully = true;
|
| 694 |
+
}
|
| 695 |
+
if (!coord_selected_successfully) {
|
| 696 |
+
if (!relevant_static_guides->empty() && rng.next_double() < prob_static_guide_snap_if_not_dynamic) {
|
| 697 |
+
new_coord_val = (*relevant_static_guides)[rng.next_int(relevant_static_guides->size())];
|
| 698 |
+
coord_selected_successfully = true;
|
| 699 |
+
}
|
| 700 |
+
}
|
| 701 |
+
// If not snapped, random depth for bulge
|
| 702 |
+
if (!coord_selected_successfully) {
|
| 703 |
+
double depth_factor = std::max(0.1, 1.0 - time_ratio * 0.9);
|
| 704 |
+
int base_depth_max = std::max(1, (int)( (MAX_COORD_VAL/300.0) * depth_factor + 1 ) );
|
| 705 |
+
int random_abs_depth = rng.next_int(1, base_depth_max);
|
| 706 |
+
if (time_ratio > 0.75 && rng.next_double() < 0.7) {
|
| 707 |
+
random_abs_depth = rng.next_int(1,2);
|
| 708 |
+
}
|
| 709 |
+
int bulge_dir_sign = (rng.next_double() < 0.5) ? 1 : -1; // Randomly outwards or inwards relative to edge line
|
| 710 |
+
if (p_s.x == p_e.x) new_coord_val = p_s.x + bulge_dir_sign * random_abs_depth; // Vertical edge, bulge in X
|
| 711 |
+
else new_coord_val = p_s.y + bulge_dir_sign * random_abs_depth; // Horizontal edge, bulge in Y
|
| 712 |
+
}
|
| 713 |
+
|
| 714 |
+
new_coord_val = std::max(0, std::min(MAX_COORD_VAL, new_coord_val));
|
| 715 |
+
|
| 716 |
+
Point v1_mod, v2_mod; // New vertices for the bulge
|
| 717 |
+
int cur_delta_m=0, cur_delta_s=0;
|
| 718 |
+
|
| 719 |
+
if (p_s.x == p_e.x) { // Original edge is vertical
|
| 720 |
+
if (new_coord_val == p_s.x) {move_made = false; goto end_move_attempt_label;} // Bulge is flat
|
| 721 |
+
v1_mod = {new_coord_val, p_s.y}; v2_mod = {new_coord_val, p_e.y};
|
| 722 |
+
// Rectangle for delta score is between X=p_s.x and X=new_coord_val, over Y-span of original edge
|
| 723 |
+
calculate_score_delta_for_rectangle(
|
| 724 |
+
std::min(p_s.x, new_coord_val), std::max(p_s.x, new_coord_val),
|
| 725 |
+
std::min(p_s.y,p_e.y), std::max(p_s.y,p_e.y),
|
| 726 |
+
cur_delta_m, cur_delta_s);
|
| 727 |
+
int sign = (new_coord_val > p_s.x) ? 1 : -1; // Bulge to the right of edge is positive X change
|
| 728 |
+
if (p_s.y > p_e.y) sign *= -1; // Correct for edge Y-direction
|
| 729 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon
|
| 730 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 731 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 732 |
+
} else { // Original edge is horizontal
|
| 733 |
+
if (new_coord_val == p_s.y) {move_made = false; goto end_move_attempt_label;} // Bulge is flat
|
| 734 |
+
v1_mod = {p_s.x, new_coord_val}; v2_mod = {p_e.x, new_coord_val};
|
| 735 |
+
// Rectangle for delta score is between Y=p_s.y and Y=new_coord_val, over X-span of original edge
|
| 736 |
+
calculate_score_delta_for_rectangle(
|
| 737 |
+
std::min(p_s.x,p_e.x), std::max(p_s.x,p_e.x),
|
| 738 |
+
std::min(p_s.y, new_coord_val), std::max(p_s.y, new_coord_val),
|
| 739 |
+
cur_delta_m, cur_delta_s);
|
| 740 |
+
int sign = (new_coord_val < p_s.y) ? 1 : -1; // Bulge "downwards" (Y decreases) means positive sign if it expands area
|
| 741 |
+
if (p_s.x > p_e.x) sign *= -1; // Correct for edge X-direction
|
| 742 |
+
if (current_signed_area < 0) sign *= -1; // Correct for CW polygon
|
| 743 |
+
candidate_state.m_count += sign * cur_delta_m;
|
| 744 |
+
candidate_state.s_count += sign * cur_delta_s;
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
// Insert new vertices into polygon
|
| 748 |
+
auto insert_pos_iter = candidate_state.poly.begin() + (edge_idx + 1);
|
| 749 |
+
insert_pos_iter = candidate_state.poly.insert(insert_pos_iter, v1_mod);
|
| 750 |
+
candidate_state.poly.insert(insert_pos_iter + 1, v2_mod);
|
| 751 |
+
|
| 752 |
+
// Mark affected edges/vertices as critical for local intersection check
|
| 753 |
+
sa_critical_edge_indices_cache.push_back(edge_idx);
|
| 754 |
+
sa_critical_edge_indices_cache.push_back(edge_idx + 1);
|
| 755 |
+
sa_critical_edge_indices_cache.push_back(edge_idx + 2);
|
| 756 |
+
move_made = true;
|
| 757 |
+
|
| 758 |
+
} else if (candidate_state.poly.size() > 4) { // Simplify Polygon (remove collinear vertex)
|
| 759 |
+
int R_start_idx = rng.next_int(candidate_state.poly.size()); // Random start for search
|
| 760 |
+
bool simplified_this_turn = false;
|
| 761 |
+
for(int k_offset=0; k_offset < candidate_state.poly.size() ; ++k_offset) {
|
| 762 |
+
int current_poly_size_before_erase = candidate_state.poly.size();
|
| 763 |
+
if (current_poly_size_before_erase <= 4) break; // Cannot simplify further
|
| 764 |
+
|
| 765 |
+
int p1_idx = (R_start_idx + k_offset) % current_poly_size_before_erase;
|
| 766 |
+
int p0_idx_old = (p1_idx - 1 + current_poly_size_before_erase) % current_poly_size_before_erase;
|
| 767 |
+
int p2_idx_old = (p1_idx + 1) % current_poly_size_before_erase;
|
| 768 |
+
|
| 769 |
+
const Point& p0 = candidate_state.poly[p0_idx_old];
|
| 770 |
+
const Point& p1 = candidate_state.poly[p1_idx];
|
| 771 |
+
const Point& p2 = candidate_state.poly[p2_idx_old];
|
| 772 |
+
|
| 773 |
+
bool collinear_x = (p0.x == p1.x && p1.x == p2.x);
|
| 774 |
+
bool collinear_y = (p0.y == p1.y && p1.y == p2.y);
|
| 775 |
+
|
| 776 |
+
if (collinear_x || collinear_y) {
|
| 777 |
+
candidate_state.poly.erase(candidate_state.poly.begin() + p1_idx);
|
| 778 |
+
simplified_this_turn = true;
|
| 779 |
+
|
| 780 |
+
int M_cand = candidate_state.poly.size();
|
| 781 |
+
int critical_vertex_idx_in_new_poly;
|
| 782 |
+
// Vertex p0 (at p0_idx_old) forms the new corner. Its index in new poly:
|
| 783 |
+
if (p1_idx == 0) { // If p1 was poly[0], p0 was poly[last]. p0 is now poly[new_last]
|
| 784 |
+
critical_vertex_idx_in_new_poly = M_cand -1;
|
| 785 |
+
} else { // Otherwise, p0's index p1_idx-1 is preserved.
|
| 786 |
+
critical_vertex_idx_in_new_poly = p1_idx - 1;
|
| 787 |
+
}
|
| 788 |
+
|
| 789 |
+
if (!candidate_state.poly.empty()) {
|
| 790 |
+
sa_critical_edge_indices_cache.push_back((critical_vertex_idx_in_new_poly - 1 + M_cand) % M_cand);
|
| 791 |
+
sa_critical_edge_indices_cache.push_back(critical_vertex_idx_in_new_poly);
|
| 792 |
+
sa_critical_edge_indices_cache.push_back((critical_vertex_idx_in_new_poly + 1) % M_cand);
|
| 793 |
+
}
|
| 794 |
+
break; // Simplified one vertex, enough for this turn
|
| 795 |
+
}
|
| 796 |
+
}
|
| 797 |
+
if (!simplified_this_turn) {move_made = false; goto end_move_attempt_label;} // No simplification found/possible
|
| 798 |
+
move_made = true;
|
| 799 |
+
}
|
| 800 |
+
|
| 801 |
+
end_move_attempt_label:; // Label for goto if a move is aborted (e.g. no change)
|
| 802 |
+
if (!move_made) continue; // No valid move attempted or made
|
| 803 |
+
|
| 804 |
+
// Validate candidate polygon
|
| 805 |
+
if (!is_polygon_structurally_sound(candidate_state.poly) || candidate_state.poly.size() < 4 ||
|
| 806 |
+
!has_distinct_vertices_unordered(candidate_state.poly)) {
|
| 807 |
+
continue; // Invalid basic structure or duplicate vertices
|
| 808 |
+
}
|
| 809 |
+
|
| 810 |
+
if (has_self_intersection_locally(candidate_state.poly, sa_critical_edge_indices_cache)) {
|
| 811 |
+
continue; // Self-intersection found
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
// Accept or reject candidate based on SA criteria
|
| 815 |
+
double candidate_raw_obj_score = candidate_state.get_raw_objective_score();
|
| 816 |
+
double current_raw_obj_score = current_state.get_raw_objective_score();
|
| 817 |
+
double score_diff = candidate_raw_obj_score - current_raw_obj_score;
|
| 818 |
+
|
| 819 |
+
if (score_diff >= 0 || (temperature > 1e-9 && rng.next_double() < std::exp(score_diff / temperature))) {
|
| 820 |
+
current_state = std::move(candidate_state); // Accept move
|
| 821 |
+
current_signed_area = polygon_signed_area_times_2(current_state.poly); // Update signed area
|
| 822 |
+
if (current_signed_area == 0 && !current_state.poly.empty() && current_state.poly.size() >=3) current_signed_area = 1; // Handle degenerate
|
| 823 |
+
|
| 824 |
+
if (current_state.get_objective_score() > best_state.get_objective_score()) {
|
| 825 |
+
best_state = current_state; // New best solution found
|
| 826 |
+
update_best_poly_guides(best_state); // Update dynamic guides
|
| 827 |
+
}
|
| 828 |
+
}
|
| 829 |
+
} // End SA loop
|
| 830 |
+
|
| 831 |
+
// Final validation of the best found state
|
| 832 |
+
bool needs_reset_to_default = false;
|
| 833 |
+
if (!is_polygon_structurally_sound(best_state.poly) ||
|
| 834 |
+
best_state.poly.size() < 4 ||
|
| 835 |
+
!has_distinct_vertices_unordered(best_state.poly) ||
|
| 836 |
+
check_self_intersection_full(best_state.poly) ) { // Full intersection check on best
|
| 837 |
+
needs_reset_to_default = true;
|
| 838 |
+
}
|
| 839 |
+
|
| 840 |
+
if (needs_reset_to_default) { // If best state is invalid, revert to default
|
| 841 |
+
best_state.poly = default_tiny_poly;
|
| 842 |
+
calculate_score_from_scratch(best_state.poly, best_state.m_count, best_state.s_count);
|
| 843 |
+
}
|
| 844 |
+
|
| 845 |
+
// If best score is 0, check if default polygon gives >0. (max(0, val+1))
|
| 846 |
+
// The score is max(0, M-S+1). So if M-S = -1, score is 0. If M-S = 0, score is 1.
|
| 847 |
+
// If best_state.get_objective_score() == 0, it means M-S+1 <= 0, so M-S <= -1.
|
| 848 |
+
// Default polygon has M=0, S=0, so M-S+1 = 1. Score is 1.
|
| 849 |
+
// So, if best_state score is 0, default is always better (score 1) or equal (if default also somehow gets 0).
|
| 850 |
+
if (best_state.get_objective_score() == 0) {
|
| 851 |
+
// This case implies M-S <= -1 for best_state. Default gives score 1.
|
| 852 |
+
// It's possible that the problem setter implies an empty polygon is not allowed or scores 0.
|
| 853 |
+
// The problem implies outputting a polygon. The default_tiny_poly is a valid polygon.
|
| 854 |
+
// The current logic already handles falling back to default_tiny_poly if the Kadane one is invalid.
|
| 855 |
+
// This check ensures if SA ends up with a 0-score polygon (e.g. captures many sardines),
|
| 856 |
+
// we check if the basic tiny square is better.
|
| 857 |
+
SAState temp_default_state; // Create a temporary default state to calculate its score
|
| 858 |
+
temp_default_state.poly = default_tiny_poly;
|
| 859 |
+
calculate_score_from_scratch(temp_default_state.poly, temp_default_state.m_count, temp_default_state.s_count);
|
| 860 |
+
// If the objectively computed score of the best_state is less than the default one, use default.
|
| 861 |
+
// This is useful if best_state.get_objective_score() became 0 due to M-S+1 <= 0, while default_tiny_poly has M-S+1=1.
|
| 862 |
+
if (best_state.get_objective_score() < temp_default_state.get_objective_score()) {
|
| 863 |
+
best_state = temp_default_state;
|
| 864 |
+
}
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
// Output the best polygon
|
| 869 |
+
std::cout << best_state.poly.size() << "\n";
|
| 870 |
+
for (const auto& p : best_state.poly) {
|
| 871 |
+
std::cout << p.x << " " << p.y << "\n";
|
| 872 |
+
}
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
int main(int argc, char *argv[]) {
|
| 877 |
+
std::ios_base::sync_with_stdio(false);
|
| 878 |
+
std::cin.tie(NULL);
|
| 879 |
+
|
| 880 |
+
// Allow overriding time limit via command line arg, for local testing
|
| 881 |
+
if (argc > 1) {
|
| 882 |
+
try {
|
| 883 |
+
ACTUAL_TIME_LIMIT_SECONDS = std::stod(argv[1]);
|
| 884 |
+
} catch (const std::exception& e) { /* keep default if parse fails */ }
|
| 885 |
+
}
|
| 886 |
+
ACTUAL_TIME_LIMIT_SECONDS -= TIME_LIMIT_SECONDS_SAFETY_MARGIN;
|
| 887 |
+
if (ACTUAL_TIME_LIMIT_SECONDS < 0.2) ACTUAL_TIME_LIMIT_SECONDS = 0.2; // Minimum sensible time limit
|
| 888 |
+
|
| 889 |
+
query_rect_indices_cache_kdtree.reserve(2 * 5000 + 500); // N_half max is 5000
|
| 890 |
+
sa_critical_edge_indices_cache.reserve(10); // Small, for a few critical edges
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
int N_half; // Number of mackerels (and sardines)
|
| 894 |
+
std::cin >> N_half;
|
| 895 |
+
|
| 896 |
+
all_fish_structs.resize(2 * N_half);
|
| 897 |
+
std::vector<int> fish_indices_for_kdtree(2 * N_half);
|
| 898 |
+
if (2 * N_half > 0) {
|
| 899 |
+
std::iota(fish_indices_for_kdtree.begin(), fish_indices_for_kdtree.end(), 0);
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
// Read mackerels
|
| 903 |
+
for (int i = 0; i < N_half; ++i) {
|
| 904 |
+
std::cin >> all_fish_structs[i].p.x >> all_fish_structs[i].p.y;
|
| 905 |
+
all_fish_structs[i].type = 1;
|
| 906 |
+
}
|
| 907 |
+
// Read sardines
|
| 908 |
+
for (int i = 0; i < N_half; ++i) {
|
| 909 |
+
std::cin >> all_fish_structs[N_half + i].p.x >> all_fish_structs[N_half + i].p.y;
|
| 910 |
+
all_fish_structs[N_half + i].type = -1;
|
| 911 |
+
}
|
| 912 |
+
|
| 913 |
+
// Build KD-tree if there are fish
|
| 914 |
+
if (!all_fish_structs.empty()) {
|
| 915 |
+
fish_kdtree_root = build_kdtree(fish_indices_for_kdtree, 0, (int)all_fish_structs.size() - 1, 0);
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
simulated_annealing_main();
|
| 919 |
+
|
| 920 |
+
// Clean up KD-tree memory
|
| 921 |
+
if (fish_kdtree_root) delete_kdtree(fish_kdtree_root);
|
| 922 |
+
|
| 923 |
+
return 0;
|
| 924 |
+
}
|
| 925 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale-bench-lite-problems/ahc046/best_program.cpp
ADDED
|
@@ -0,0 +1,1111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <queue>
|
| 6 |
+
#include <algorithm>
|
| 7 |
+
#include <tuple>
|
| 8 |
+
#include <array>
|
| 9 |
+
#include <chrono>
|
| 10 |
+
#include <random>
|
| 11 |
+
#include <cmath> // For std::exp, std::pow
|
| 12 |
+
#include <numeric> // For std::iota
|
| 13 |
+
#include <cstring> // For std::memcmp used in grid-bound caches
|
| 14 |
+
|
| 15 |
+
// Constants
|
| 16 |
+
const int N_GRID = 20;
|
| 17 |
+
const int M_TARGETS_INPUT = 40;
|
| 18 |
+
const int NUM_SEGMENTS = M_TARGETS_INPUT - 1;
|
| 19 |
+
const int INF_COST = 1e9;
|
| 20 |
+
const int MAX_TOTAL_TURNS = 2 * N_GRID * M_TARGETS_INPUT; // 2*20*40 = 1600
|
| 21 |
+
|
| 22 |
+
// Randomness
|
| 23 |
+
unsigned int RND_SEED = std::chrono::steady_clock::now().time_since_epoch().count();
|
| 24 |
+
std::mt19937 rng(RND_SEED);
|
| 25 |
+
|
| 26 |
+
// Coordinates
|
| 27 |
+
struct Pos {
|
| 28 |
+
int r, c;
|
| 29 |
+
bool operator==(const Pos& other) const { return r == other.r && c == other.c; }
|
| 30 |
+
bool operator!=(const Pos& other) const { return !(*this == other); }
|
| 31 |
+
bool operator<(const Pos& other) const {
|
| 32 |
+
if (r != other.r) return r < other.r;
|
| 33 |
+
return c < other.c;
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
const Pos INVALID_POS = {-1, -1};
|
| 37 |
+
|
| 38 |
+
// Grid state
|
| 39 |
+
using Grid = std::array<std::array<bool, N_GRID>, N_GRID>; // true if block exists
|
| 40 |
+
|
| 41 |
+
bool is_valid_pos(Pos p) {
|
| 42 |
+
return p.r >= 0 && p.r < N_GRID && p.c >= 0 && p.c < N_GRID;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
bool is_blocked_pos(Pos p, const Grid& grid) {
|
| 46 |
+
if (!is_valid_pos(p)) return true;
|
| 47 |
+
return grid[p.r][p.c];
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
void toggle_block_pos(Pos p, Grid& grid) {
|
| 51 |
+
if (is_valid_pos(p)) {
|
| 52 |
+
grid[p.r][p.c] = !grid[p.r][p.c];
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
// Directions
|
| 57 |
+
const int DR[] = {-1, 1, 0, 0}; // U, D, L, R
|
| 58 |
+
const int DC[] = {0, 0, -1, 1};
|
| 59 |
+
const char DIR_CHARS[] = {'U', 'D', 'L', 'R'};
|
| 60 |
+
const int DIR_REV_IDX[] = {1, 0, 3, 2}; // U(0)<->D(1), L(2)<->R(3)
|
| 61 |
+
|
| 62 |
+
// Global BFS structures for optimization
|
| 63 |
+
unsigned int g_bfs_generation_id = 0;
|
| 64 |
+
std::array<std::array<unsigned int, N_GRID>, N_GRID> g_bfs_cell_last_visited_generation;
|
| 65 |
+
std::array<std::array<int, N_GRID>, N_GRID> g_bfs_cell_dist;
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
std::string reconstruct_path_from_came_from(Pos start_pos, Pos dest_pos,
|
| 69 |
+
const std::array<std::array<std::pair<Pos, std::pair<char, char>>, N_GRID>, N_GRID>& came_from_data) {
|
| 70 |
+
std::string path_actions_str_reversed = ""; Pos p_trace = dest_pos;
|
| 71 |
+
while(p_trace != start_pos && is_valid_pos(p_trace)) {
|
| 72 |
+
auto const& action_info = came_from_data[p_trace.r][p_trace.c];
|
| 73 |
+
path_actions_str_reversed += action_info.second.second;
|
| 74 |
+
path_actions_str_reversed += action_info.second.first;
|
| 75 |
+
p_trace = action_info.first;
|
| 76 |
+
}
|
| 77 |
+
std::reverse(path_actions_str_reversed.begin(), path_actions_str_reversed.end());
|
| 78 |
+
return path_actions_str_reversed;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
struct BFSResult { int cost; std::string actions_str; };
|
| 82 |
+
|
| 83 |
+
/*
|
| 84 |
+
bfs:
|
| 85 |
+
- Purpose: shortest-path search over states with unit-cost edges for Move/Slide.
|
| 86 |
+
- Technique: Precompute nearest blocked cell in four directions for every cell
|
| 87 |
+
to get O(1) slide landings instead of O(N) scanning per slide.
|
| 88 |
+
- Intermediate avoid: when avoid_intermediate_target=true, disallow any slide
|
| 89 |
+
that would pass over that tile (except when it equals the destination).
|
| 90 |
+
- If build_action_str=true, store parent and reconstruct the action string.
|
| 91 |
+
*/
|
| 92 |
+
BFSResult bfs(Pos start_pos, Pos dest_pos, const Grid& grid, Pos intermediate_target_to_avoid, bool avoid_intermediate_target, bool build_action_str) {
|
| 93 |
+
g_bfs_generation_id++;
|
| 94 |
+
|
| 95 |
+
// Precompute nearest blocked indices with caching per grid to avoid duplicate work within hot loops.
|
| 96 |
+
// Cache key: raw bytes of Grid. Safe because Grid is a POD of bools in std::array.
|
| 97 |
+
static Grid last_grid_for_bounds_bfs;
|
| 98 |
+
static bool has_bounds_cache_bfs = false;
|
| 99 |
+
static std::array<std::array<int, N_GRID>, N_GRID> LBc, RBc, UBc, DBc;
|
| 100 |
+
|
| 101 |
+
bool reuse_bounds = has_bounds_cache_bfs && (std::memcmp(&grid, &last_grid_for_bounds_bfs, sizeof(Grid)) == 0);
|
| 102 |
+
if (!reuse_bounds) {
|
| 103 |
+
for (int r = 0; r < N_GRID; ++r) {
|
| 104 |
+
int last = -1;
|
| 105 |
+
for (int c = 0; c < N_GRID; ++c) {
|
| 106 |
+
if (grid[r][c]) last = c;
|
| 107 |
+
LBc[r][c] = last;
|
| 108 |
+
}
|
| 109 |
+
last = N_GRID;
|
| 110 |
+
for (int c = N_GRID - 1; c >= 0; --c) {
|
| 111 |
+
if (grid[r][c]) last = c;
|
| 112 |
+
RBc[r][c] = last;
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
for (int c = 0; c < N_GRID; ++c) {
|
| 116 |
+
int last = -1;
|
| 117 |
+
for (int r = 0; r < N_GRID; ++r) {
|
| 118 |
+
if (grid[r][c]) last = r;
|
| 119 |
+
UBc[r][c] = last;
|
| 120 |
+
}
|
| 121 |
+
last = N_GRID;
|
| 122 |
+
for (int r = N_GRID - 1; r >= 0; --r) {
|
| 123 |
+
if (grid[r][c]) last = r;
|
| 124 |
+
DBc[r][c] = last;
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
last_grid_for_bounds_bfs = grid;
|
| 128 |
+
has_bounds_cache_bfs = true;
|
| 129 |
+
}
|
| 130 |
+
auto& LB = LBc; auto& RB = RBc; auto& UB = UBc; auto& DB = DBc;
|
| 131 |
+
|
| 132 |
+
std::array<std::array<std::pair<Pos, std::pair<char, char>>, N_GRID>, N_GRID> came_from_local;
|
| 133 |
+
std::queue<Pos> q;
|
| 134 |
+
|
| 135 |
+
if (!is_valid_pos(start_pos)) return {INF_COST, ""};
|
| 136 |
+
if (avoid_intermediate_target && start_pos == intermediate_target_to_avoid && start_pos != dest_pos) {
|
| 137 |
+
return {INF_COST, ""};
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
g_bfs_cell_last_visited_generation[start_pos.r][start_pos.c] = g_bfs_generation_id;
|
| 141 |
+
g_bfs_cell_dist[start_pos.r][start_pos.c] = 0;
|
| 142 |
+
q.push(start_pos);
|
| 143 |
+
|
| 144 |
+
int min_dist_to_dest = (start_pos == dest_pos) ? 0 : INF_COST;
|
| 145 |
+
|
| 146 |
+
while (!q.empty()) {
|
| 147 |
+
Pos curr = q.front();
|
| 148 |
+
q.pop();
|
| 149 |
+
int d = g_bfs_cell_dist[curr.r][curr.c];
|
| 150 |
+
|
| 151 |
+
if (curr == dest_pos) min_dist_to_dest = std::min(min_dist_to_dest, d);
|
| 152 |
+
if (min_dist_to_dest != INF_COST && d >= min_dist_to_dest && curr != dest_pos) continue;
|
| 153 |
+
if (d + 1 > N_GRID * N_GRID) continue;
|
| 154 |
+
|
| 155 |
+
// Moves
|
| 156 |
+
for (int i = 0; i < 4; ++i) {
|
| 157 |
+
Pos next_p = {curr.r + DR[i], curr.c + DC[i]};
|
| 158 |
+
if (is_blocked_pos(next_p, grid)) continue;
|
| 159 |
+
if (avoid_intermediate_target && next_p == intermediate_target_to_avoid && next_p != dest_pos) continue;
|
| 160 |
+
|
| 161 |
+
bool visited = (g_bfs_cell_last_visited_generation[next_p.r][next_p.c] == g_bfs_generation_id);
|
| 162 |
+
if (!visited || g_bfs_cell_dist[next_p.r][next_p.c] > d + 1) {
|
| 163 |
+
g_bfs_cell_last_visited_generation[next_p.r][next_p.c] = g_bfs_generation_id;
|
| 164 |
+
g_bfs_cell_dist[next_p.r][next_p.c] = d + 1;
|
| 165 |
+
if (build_action_str) came_from_local[next_p.r][next_p.c] = {curr, {'M', DIR_CHARS[i]}};
|
| 166 |
+
q.push(next_p);
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
// Slides (using precomputed landings)
|
| 171 |
+
for (int i = 0; i < 4; ++i) {
|
| 172 |
+
Pos next_p = curr;
|
| 173 |
+
if (i == 0) { // U
|
| 174 |
+
int rland = UB[curr.r][curr.c] + 1;
|
| 175 |
+
if (rland != curr.r) next_p = {rland, curr.c};
|
| 176 |
+
} else if (i == 1) { // D
|
| 177 |
+
int rland = DB[curr.r][curr.c] - 1;
|
| 178 |
+
if (rland != curr.r) next_p = {rland, curr.c};
|
| 179 |
+
} else if (i == 2) { // L
|
| 180 |
+
int cland = LB[curr.r][curr.c] + 1;
|
| 181 |
+
if (cland != curr.c) next_p = {curr.r, cland};
|
| 182 |
+
} else { // R
|
| 183 |
+
int cland = RB[curr.r][curr.c] - 1;
|
| 184 |
+
if (cland != curr.c) next_p = {curr.r, cland};
|
| 185 |
+
}
|
| 186 |
+
if (next_p == curr) continue;
|
| 187 |
+
|
| 188 |
+
// Avoid sliding over an intermediate target (unless it equals dest)
|
| 189 |
+
if (avoid_intermediate_target && intermediate_target_to_avoid != dest_pos) {
|
| 190 |
+
if (i == 0) { // U
|
| 191 |
+
if (intermediate_target_to_avoid.c == curr.c &&
|
| 192 |
+
intermediate_target_to_avoid.r >= next_p.r &&
|
| 193 |
+
intermediate_target_to_avoid.r <= curr.r - 1) {
|
| 194 |
+
continue;
|
| 195 |
+
}
|
| 196 |
+
} else if (i == 1) { // D
|
| 197 |
+
if (intermediate_target_to_avoid.c == curr.c &&
|
| 198 |
+
intermediate_target_to_avoid.r <= next_p.r &&
|
| 199 |
+
intermediate_target_to_avoid.r >= curr.r + 1) {
|
| 200 |
+
continue;
|
| 201 |
+
}
|
| 202 |
+
} else if (i == 2) { // L
|
| 203 |
+
if (intermediate_target_to_avoid.r == curr.r &&
|
| 204 |
+
intermediate_target_to_avoid.c >= next_p.c &&
|
| 205 |
+
intermediate_target_to_avoid.c <= curr.c - 1) {
|
| 206 |
+
continue;
|
| 207 |
+
}
|
| 208 |
+
} else { // R
|
| 209 |
+
if (intermediate_target_to_avoid.r == curr.r &&
|
| 210 |
+
intermediate_target_to_avoid.c <= next_p.c &&
|
| 211 |
+
intermediate_target_to_avoid.c >= curr.c + 1) {
|
| 212 |
+
continue;
|
| 213 |
+
}
|
| 214 |
+
}
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
bool visited = (g_bfs_cell_last_visited_generation[next_p.r][next_p.c] == g_bfs_generation_id);
|
| 218 |
+
if (!visited || g_bfs_cell_dist[next_p.r][next_p.c] > d + 1) {
|
| 219 |
+
g_bfs_cell_last_visited_generation[next_p.r][next_p.c] = g_bfs_generation_id;
|
| 220 |
+
g_bfs_cell_dist[next_p.r][next_p.c] = d + 1;
|
| 221 |
+
if (build_action_str) came_from_local[next_p.r][next_p.c] = {curr, {'S', DIR_CHARS[i]}};
|
| 222 |
+
q.push(next_p);
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
BFSResult res = {INF_COST, ""};
|
| 228 |
+
if (is_valid_pos(dest_pos) && g_bfs_cell_last_visited_generation[dest_pos.r][dest_pos.c] == g_bfs_generation_id) {
|
| 229 |
+
res.cost = g_bfs_cell_dist[dest_pos.r][dest_pos.c];
|
| 230 |
+
if (build_action_str && res.cost != INF_COST) {
|
| 231 |
+
res.actions_str = reconstruct_path_from_came_from(start_pos, dest_pos, came_from_local);
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
return res;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
/*
|
| 238 |
+
bfs_all:
|
| 239 |
+
- Purpose: multi-destination BFS from start_pos computing dists to all cells.
|
| 240 |
+
- Technique: Same O(1) slide landing precomputation as bfs() to accelerate
|
| 241 |
+
transitions. Always enforces avoidance of the given intermediate.
|
| 242 |
+
- If store_came_from=true, records parents to reconstruct any path if needed.
|
| 243 |
+
*/
|
| 244 |
+
void bfs_all(Pos start_pos, const Grid& grid,
|
| 245 |
+
Pos intermediate_target_to_avoid, bool strictly_avoid_intermediate,
|
| 246 |
+
std::array<std::array<int, N_GRID>, N_GRID>& dist_out,
|
| 247 |
+
std::array<std::array<std::pair<Pos, std::pair<char, char>>, N_GRID>, N_GRID>& came_from_out,
|
| 248 |
+
bool store_came_from) {
|
| 249 |
+
g_bfs_generation_id++;
|
| 250 |
+
std::queue<Pos> q;
|
| 251 |
+
|
| 252 |
+
for (int r_idx = 0; r_idx < N_GRID; ++r_idx) std::fill(dist_out[r_idx].begin(), dist_out[r_idx].end(), INF_COST);
|
| 253 |
+
|
| 254 |
+
if (!is_valid_pos(start_pos)) return;
|
| 255 |
+
if (strictly_avoid_intermediate && start_pos == intermediate_target_to_avoid) return;
|
| 256 |
+
|
| 257 |
+
// Precompute nearest blocked indices with caching per grid (reduces cost when running multiple BFS on the same grid).
|
| 258 |
+
static Grid last_grid_for_bounds_bfs_all;
|
| 259 |
+
static bool has_bounds_cache_bfs_all = false;
|
| 260 |
+
static std::array<std::array<int, N_GRID>, N_GRID> LBc_all, RBc_all, UBc_all, DBc_all;
|
| 261 |
+
|
| 262 |
+
bool reuse_bounds = has_bounds_cache_bfs_all && (std::memcmp(&grid, &last_grid_for_bounds_bfs_all, sizeof(Grid)) == 0);
|
| 263 |
+
if (!reuse_bounds) {
|
| 264 |
+
for (int r = 0; r < N_GRID; ++r) {
|
| 265 |
+
int last = -1;
|
| 266 |
+
for (int c = 0; c < N_GRID; ++c) {
|
| 267 |
+
if (grid[r][c]) last = c;
|
| 268 |
+
LBc_all[r][c] = last;
|
| 269 |
+
}
|
| 270 |
+
last = N_GRID;
|
| 271 |
+
for (int c = N_GRID - 1; c >= 0; --c) {
|
| 272 |
+
if (grid[r][c]) last = c;
|
| 273 |
+
RBc_all[r][c] = last;
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
for (int c = 0; c < N_GRID; ++c) {
|
| 277 |
+
int last = -1;
|
| 278 |
+
for (int r = 0; r < N_GRID; ++r) {
|
| 279 |
+
if (grid[r][c]) last = r;
|
| 280 |
+
UBc_all[r][c] = last;
|
| 281 |
+
}
|
| 282 |
+
last = N_GRID;
|
| 283 |
+
for (int r = N_GRID - 1; r >= 0; --r) {
|
| 284 |
+
if (grid[r][c]) last = r;
|
| 285 |
+
DBc_all[r][c] = last;
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
last_grid_for_bounds_bfs_all = grid;
|
| 289 |
+
has_bounds_cache_bfs_all = true;
|
| 290 |
+
}
|
| 291 |
+
auto& LB = LBc_all; auto& RB = RBc_all; auto& UB = UBc_all; auto& DB = DBc_all;
|
| 292 |
+
|
| 293 |
+
g_bfs_cell_last_visited_generation[start_pos.r][start_pos.c] = g_bfs_generation_id;
|
| 294 |
+
g_bfs_cell_dist[start_pos.r][start_pos.c] = 0;
|
| 295 |
+
q.push(start_pos);
|
| 296 |
+
|
| 297 |
+
while (!q.empty()) {
|
| 298 |
+
Pos curr = q.front();
|
| 299 |
+
q.pop();
|
| 300 |
+
int d = g_bfs_cell_dist[curr.r][curr.c];
|
| 301 |
+
|
| 302 |
+
if (d + 1 > N_GRID * N_GRID) continue;
|
| 303 |
+
|
| 304 |
+
// Moves
|
| 305 |
+
for (int i = 0; i < 4; ++i) {
|
| 306 |
+
Pos next_p = {curr.r + DR[i], curr.c + DC[i]};
|
| 307 |
+
if (is_blocked_pos(next_p, grid)) continue;
|
| 308 |
+
if (strictly_avoid_intermediate && next_p == intermediate_target_to_avoid) continue;
|
| 309 |
+
|
| 310 |
+
bool visited = (g_bfs_cell_last_visited_generation[next_p.r][next_p.c] == g_bfs_generation_id);
|
| 311 |
+
if (!visited || g_bfs_cell_dist[next_p.r][next_p.c] > d + 1) {
|
| 312 |
+
g_bfs_cell_last_visited_generation[next_p.r][next_p.c] = g_bfs_generation_id;
|
| 313 |
+
g_bfs_cell_dist[next_p.r][next_p.c] = d + 1;
|
| 314 |
+
if (store_came_from) came_from_out[next_p.r][next_p.c] = {curr, {'M', DIR_CHARS[i]}};
|
| 315 |
+
q.push(next_p);
|
| 316 |
+
}
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
// Slides
|
| 320 |
+
for (int i = 0; i < 4; ++i) {
|
| 321 |
+
Pos next_p = curr;
|
| 322 |
+
if (i == 0) { // U
|
| 323 |
+
int rland = UB[curr.r][curr.c] + 1;
|
| 324 |
+
if (rland != curr.r) next_p = {rland, curr.c};
|
| 325 |
+
} else if (i == 1) { // D
|
| 326 |
+
int rland = DB[curr.r][curr.c] - 1;
|
| 327 |
+
if (rland != curr.r) next_p = {rland, curr.c};
|
| 328 |
+
} else if (i == 2) { // L
|
| 329 |
+
int cland = LB[curr.r][curr.c] + 1;
|
| 330 |
+
if (cland != curr.c) next_p = {curr.r, cland};
|
| 331 |
+
} else { // R
|
| 332 |
+
int cland = RB[curr.r][curr.c] - 1;
|
| 333 |
+
if (cland != curr.c) next_p = {curr.r, cland};
|
| 334 |
+
}
|
| 335 |
+
if (next_p == curr) continue;
|
| 336 |
+
|
| 337 |
+
if (strictly_avoid_intermediate) {
|
| 338 |
+
if (i == 0) { // U
|
| 339 |
+
if (intermediate_target_to_avoid.c == curr.c &&
|
| 340 |
+
intermediate_target_to_avoid.r >= next_p.r &&
|
| 341 |
+
intermediate_target_to_avoid.r <= curr.r - 1) {
|
| 342 |
+
continue;
|
| 343 |
+
}
|
| 344 |
+
} else if (i == 1) { // D
|
| 345 |
+
if (intermediate_target_to_avoid.c == curr.c &&
|
| 346 |
+
intermediate_target_to_avoid.r <= next_p.r &&
|
| 347 |
+
intermediate_target_to_avoid.r >= curr.r + 1) {
|
| 348 |
+
continue;
|
| 349 |
+
}
|
| 350 |
+
} else if (i == 2) { // L
|
| 351 |
+
if (intermediate_target_to_avoid.r == curr.r &&
|
| 352 |
+
intermediate_target_to_avoid.c >= next_p.c &&
|
| 353 |
+
intermediate_target_to_avoid.c <= curr.c - 1) {
|
| 354 |
+
continue;
|
| 355 |
+
}
|
| 356 |
+
} else { // R
|
| 357 |
+
if (intermediate_target_to_avoid.r == curr.r &&
|
| 358 |
+
intermediate_target_to_avoid.c <= next_p.c &&
|
| 359 |
+
intermediate_target_to_avoid.c >= curr.c + 1) {
|
| 360 |
+
continue;
|
| 361 |
+
}
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
bool visited = (g_bfs_cell_last_visited_generation[next_p.r][next_p.c] == g_bfs_generation_id);
|
| 366 |
+
if (!visited || g_bfs_cell_dist[next_p.r][next_p.c] > d + 1) {
|
| 367 |
+
g_bfs_cell_last_visited_generation[next_p.r][next_p.c] = g_bfs_generation_id;
|
| 368 |
+
g_bfs_cell_dist[next_p.r][next_p.c] = d + 1;
|
| 369 |
+
if (store_came_from) came_from_out[next_p.r][next_p.c] = {curr, {'S', DIR_CHARS[i]}};
|
| 370 |
+
q.push(next_p);
|
| 371 |
+
}
|
| 372 |
+
}
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
for (int r_idx = 0; r_idx < N_GRID; ++r_idx) {
|
| 376 |
+
for (int c_idx = 0; c_idx < N_GRID; ++c_idx) {
|
| 377 |
+
if (g_bfs_cell_last_visited_generation[r_idx][c_idx] == g_bfs_generation_id) {
|
| 378 |
+
dist_out[r_idx][c_idx] = g_bfs_cell_dist[r_idx][c_idx];
|
| 379 |
+
}
|
| 380 |
+
}
|
| 381 |
+
}
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
Pos G_initial_pos;
|
| 385 |
+
std::vector<Pos> G_targets_vec;
|
| 386 |
+
|
| 387 |
+
struct SegmentExecResult { int turns = INF_COST; std::string actions_str; };
|
| 388 |
+
|
| 389 |
+
bool apply_direct_path_strat(Pos cur_P, Pos target_P, const Grid& g, SegmentExecResult& res, bool build_action_str) {
|
| 390 |
+
if (is_blocked_pos(target_P, g)) return false;
|
| 391 |
+
BFSResult bfs_res = bfs(cur_P, target_P, g, INVALID_POS, false, build_action_str);
|
| 392 |
+
if (bfs_res.cost == INF_COST) return false;
|
| 393 |
+
res.turns = bfs_res.cost;
|
| 394 |
+
if(build_action_str) res.actions_str = bfs_res.actions_str; else res.actions_str.clear();
|
| 395 |
+
return true;
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
bool apply_unblock_and_go_strat(Pos cur_P, Pos target_P, Grid& g , SegmentExecResult& res, bool build_action_str) {
|
| 399 |
+
if (!is_blocked_pos(target_P, g)) return false;
|
| 400 |
+
|
| 401 |
+
std::array<std::array<int, N_GRID>, N_GRID> dist_from_cur_P;
|
| 402 |
+
std::array<std::array<std::pair<Pos, std::pair<char, char>>, N_GRID>, N_GRID> came_from_data;
|
| 403 |
+
|
| 404 |
+
bfs_all(cur_P, g, target_P, true, dist_from_cur_P, came_from_data, build_action_str);
|
| 405 |
+
|
| 406 |
+
Pos best_adj_P = INVALID_POS;
|
| 407 |
+
int cost_to_best_adj_P = INF_COST;
|
| 408 |
+
char alter_dir_char_to_unblock = ' ';
|
| 409 |
+
|
| 410 |
+
for (int i=0; i<4; ++i) {
|
| 411 |
+
Pos adj_P = {target_P.r + DR[i], target_P.c + DC[i]};
|
| 412 |
+
if (!is_valid_pos(adj_P) || is_blocked_pos(adj_P, g)) continue;
|
| 413 |
+
|
| 414 |
+
if (dist_from_cur_P[adj_P.r][adj_P.c] < cost_to_best_adj_P) {
|
| 415 |
+
cost_to_best_adj_P = dist_from_cur_P[adj_P.r][adj_P.c];
|
| 416 |
+
best_adj_P = adj_P;
|
| 417 |
+
alter_dir_char_to_unblock = DIR_CHARS[DIR_REV_IDX[i]];
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
if (best_adj_P == INVALID_POS || cost_to_best_adj_P == INF_COST) return false;
|
| 422 |
+
|
| 423 |
+
res.turns = cost_to_best_adj_P + 1 + 1;
|
| 424 |
+
if (build_action_str) {
|
| 425 |
+
res.actions_str = reconstruct_path_from_came_from(cur_P, best_adj_P, came_from_data);
|
| 426 |
+
res.actions_str += 'A'; res.actions_str += alter_dir_char_to_unblock;
|
| 427 |
+
res.actions_str += 'M'; res.actions_str += alter_dir_char_to_unblock;
|
| 428 |
+
} else {
|
| 429 |
+
res.actions_str.clear();
|
| 430 |
+
}
|
| 431 |
+
toggle_block_pos(target_P, g);
|
| 432 |
+
return true;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
bool apply_slide_strat(Pos cur_P, Pos target_P, Grid& g , SegmentExecResult& res, int slide_dir_idx, int type, bool build_action_str) {
|
| 436 |
+
if (is_blocked_pos(target_P, g)) return false;
|
| 437 |
+
|
| 438 |
+
int slide_dr = DR[slide_dir_idx], slide_dc = DC[slide_dir_idx];
|
| 439 |
+
char slide_dir_char = DIR_CHARS[slide_dir_idx];
|
| 440 |
+
|
| 441 |
+
Pos slide_start_P = {target_P.r - slide_dr, target_P.c - slide_dc};
|
| 442 |
+
Pos block_at_P = {target_P.r + slide_dr, target_P.c + slide_dc};
|
| 443 |
+
|
| 444 |
+
if (!is_valid_pos(slide_start_P)) return false;
|
| 445 |
+
if (slide_start_P == target_P) return false;
|
| 446 |
+
|
| 447 |
+
if (type == 0) {
|
| 448 |
+
bool wall_exists_for_slide = !is_valid_pos(block_at_P) || is_blocked_pos(block_at_P, g);
|
| 449 |
+
if (!wall_exists_for_slide) return false;
|
| 450 |
+
|
| 451 |
+
BFSResult path_to_slide_start_P = bfs(cur_P, slide_start_P, g,
|
| 452 |
+
target_P, true, build_action_str);
|
| 453 |
+
if (path_to_slide_start_P.cost == INF_COST) return false;
|
| 454 |
+
|
| 455 |
+
res.turns = path_to_slide_start_P.cost + 1;
|
| 456 |
+
if (build_action_str) {
|
| 457 |
+
res.actions_str = path_to_slide_start_P.actions_str;
|
| 458 |
+
res.actions_str += 'S'; res.actions_str += slide_dir_char;
|
| 459 |
+
} else {
|
| 460 |
+
res.actions_str.clear();
|
| 461 |
+
}
|
| 462 |
+
return true;
|
| 463 |
+
|
| 464 |
+
} else if (type == 1) {
|
| 465 |
+
if (!is_valid_pos(block_at_P)) return false;
|
| 466 |
+
if (is_blocked_pos(block_at_P, g)) return false;
|
| 467 |
+
|
| 468 |
+
BFSResult path_cur_to_target_P = bfs(cur_P, target_P, g, INVALID_POS, false, build_action_str);
|
| 469 |
+
if (path_cur_to_target_P.cost == INF_COST) return false;
|
| 470 |
+
|
| 471 |
+
Grid g_after_alter = g;
|
| 472 |
+
toggle_block_pos(block_at_P, g_after_alter);
|
| 473 |
+
char alter_dir_char_for_block = DIR_CHARS[slide_dir_idx];
|
| 474 |
+
|
| 475 |
+
BFSResult path_target_to_slide_start_P = bfs(target_P, slide_start_P, g_after_alter,
|
| 476 |
+
target_P, true, build_action_str);
|
| 477 |
+
if (path_target_to_slide_start_P.cost == INF_COST) return false;
|
| 478 |
+
|
| 479 |
+
res.turns = path_cur_to_target_P.cost + 1 + path_target_to_slide_start_P.cost + 1;
|
| 480 |
+
if (build_action_str) {
|
| 481 |
+
res.actions_str = path_cur_to_target_P.actions_str;
|
| 482 |
+
res.actions_str += 'A'; res.actions_str += alter_dir_char_for_block;
|
| 483 |
+
res.actions_str += path_target_to_slide_start_P.actions_str;
|
| 484 |
+
res.actions_str += 'S'; res.actions_str += slide_dir_char;
|
| 485 |
+
} else {
|
| 486 |
+
res.actions_str.clear();
|
| 487 |
+
}
|
| 488 |
+
g = g_after_alter;
|
| 489 |
+
return true;
|
| 490 |
+
}
|
| 491 |
+
return false;
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
const int NUM_BASE_STRATEGIES_DIRECT = 1;
|
| 495 |
+
const int NUM_BASE_STRATEGIES_UNBLOCK = 1;
|
| 496 |
+
const int NUM_BASE_STRATEGIES_SLIDE_TYPE0 = 4;
|
| 497 |
+
const int NUM_BASE_STRATEGIES_SLIDE_TYPE1 = 0; // prune type1 (place-after-reaching-target) slides to shrink search; dominated in practice
|
| 498 |
+
const int NUM_BASE_STRATEGIES = NUM_BASE_STRATEGIES_DIRECT + NUM_BASE_STRATEGIES_UNBLOCK +
|
| 499 |
+
NUM_BASE_STRATEGIES_SLIDE_TYPE0 + NUM_BASE_STRATEGIES_SLIDE_TYPE1; // 1+1+4+0 = 6
|
| 500 |
+
|
| 501 |
+
bool apply_base_strategy_internal(int base_code, Pos cur_P, Pos target_P, Grid& g, SegmentExecResult& res, bool build_action_str) {
|
| 502 |
+
if (base_code == 0) return apply_direct_path_strat(cur_P, target_P, g, res, build_action_str);
|
| 503 |
+
if (base_code == 1) return apply_unblock_and_go_strat(cur_P, target_P, g, res, build_action_str);
|
| 504 |
+
|
| 505 |
+
int type = -1, dir_idx = -1;
|
| 506 |
+
if (base_code >= 2 && base_code < 2 + NUM_BASE_STRATEGIES_SLIDE_TYPE0) {
|
| 507 |
+
type = 0; dir_idx = base_code - 2;
|
| 508 |
+
}
|
| 509 |
+
else if (base_code >= 2 + NUM_BASE_STRATEGIES_SLIDE_TYPE0 &&
|
| 510 |
+
base_code < 2 + NUM_BASE_STRATEGIES_SLIDE_TYPE0 + NUM_BASE_STRATEGIES_SLIDE_TYPE1) {
|
| 511 |
+
type = 1; dir_idx = base_code - (2 + NUM_BASE_STRATEGIES_SLIDE_TYPE0);
|
| 512 |
+
}
|
| 513 |
+
else return false;
|
| 514 |
+
|
| 515 |
+
return apply_slide_strat(cur_P, target_P, g, res, dir_idx, type, build_action_str);
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
const int NUM_POST_ALTER_OPTIONS_NONE = 1;
|
| 519 |
+
const int NUM_POST_ALTER_OPTIONS_ADJACENT = 4;
|
| 520 |
+
const int NUM_POST_ALTER_OPTIONS_MOVE_PLUS_ALTER = 12;
|
| 521 |
+
const int NUM_POST_ALTER_OPTIONS_CUMULATIVE_NONE = NUM_POST_ALTER_OPTIONS_NONE;
|
| 522 |
+
const int NUM_POST_ALTER_OPTIONS_CUMULATIVE_ADJACENT = NUM_POST_ALTER_OPTIONS_CUMULATIVE_NONE + NUM_POST_ALTER_OPTIONS_ADJACENT;
|
| 523 |
+
const int NUM_POST_ALTER_OPTIONS = NUM_POST_ALTER_OPTIONS_CUMULATIVE_ADJACENT + NUM_POST_ALTER_OPTIONS_MOVE_PLUS_ALTER;
|
| 524 |
+
const int TOTAL_STRATEGIES_PER_SEGMENT = NUM_BASE_STRATEGIES * NUM_POST_ALTER_OPTIONS; // 6 * 17 = 102
|
| 525 |
+
const int GREEDY_REOPTIMIZE_SUBSET_SIZE = 40;
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
bool apply_combined_strategy(int combined_code, Pos& player_pos_ref ,
|
| 529 |
+
Pos segment_target_P, Grid& g ,
|
| 530 |
+
SegmentExecResult& res , bool build_action_str) {
|
| 531 |
+
// Docstring: Execute a two-phase plan encoded by combined_code.
|
| 532 |
+
// base = combined_code % NUM_BASE_STRATEGIES (0:direct,1:unblock,2..5:slide0 by dir),
|
| 533 |
+
// post = combined_code / NUM_BASE_STRATEGIES (0:none, 1..4:adjacent alter, others: move+alter).
|
| 534 |
+
// Mutates player_pos_ref and grid; returns per-segment cost and optional action string.
|
| 535 |
+
res.turns = 0;
|
| 536 |
+
res.actions_str.clear();
|
| 537 |
+
|
| 538 |
+
int base_strategy_code = combined_code % NUM_BASE_STRATEGIES;
|
| 539 |
+
int post_alter_option_code = combined_code / NUM_BASE_STRATEGIES;
|
| 540 |
+
|
| 541 |
+
Pos player_original_pos_at_segment_start = player_pos_ref;
|
| 542 |
+
Grid g_original_at_segment_start = g;
|
| 543 |
+
|
| 544 |
+
bool base_success = apply_base_strategy_internal(base_strategy_code, player_original_pos_at_segment_start, segment_target_P, g, res, build_action_str);
|
| 545 |
+
|
| 546 |
+
if (!base_success) {
|
| 547 |
+
g = g_original_at_segment_start;
|
| 548 |
+
return false;
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
Pos player_pos_after_base = segment_target_P;
|
| 552 |
+
|
| 553 |
+
if (post_alter_option_code == 0) {
|
| 554 |
+
// No action
|
| 555 |
+
} else if (post_alter_option_code < NUM_POST_ALTER_OPTIONS_CUMULATIVE_ADJACENT) {
|
| 556 |
+
int alter_dir_idx = post_alter_option_code - NUM_POST_ALTER_OPTIONS_CUMULATIVE_NONE;
|
| 557 |
+
Pos alter_on_P = {player_pos_after_base.r + DR[alter_dir_idx], player_pos_after_base.c + DC[alter_dir_idx]};
|
| 558 |
+
|
| 559 |
+
if (!is_valid_pos(alter_on_P)) {
|
| 560 |
+
g = g_original_at_segment_start;
|
| 561 |
+
return false;
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
res.turns++;
|
| 565 |
+
if (build_action_str) {
|
| 566 |
+
res.actions_str += 'A';
|
| 567 |
+
res.actions_str += DIR_CHARS[alter_dir_idx];
|
| 568 |
+
}
|
| 569 |
+
toggle_block_pos(alter_on_P, g);
|
| 570 |
+
} else {
|
| 571 |
+
int offset_code = post_alter_option_code - NUM_POST_ALTER_OPTIONS_CUMULATIVE_ADJACENT;
|
| 572 |
+
int D1_idx_move = offset_code / 3;
|
| 573 |
+
int D2_choice_idx_alter = offset_code % 3;
|
| 574 |
+
|
| 575 |
+
int D2_idx_alter = -1;
|
| 576 |
+
int current_choice_count = 0;
|
| 577 |
+
for (int d_candidate = 0; d_candidate < 4; ++d_candidate) {
|
| 578 |
+
if (d_candidate == DIR_REV_IDX[D1_idx_move]) continue;
|
| 579 |
+
if (current_choice_count == D2_choice_idx_alter) {
|
| 580 |
+
D2_idx_alter = d_candidate;
|
| 581 |
+
break;
|
| 582 |
+
}
|
| 583 |
+
current_choice_count++;
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
Pos S1_moved_pos = {player_pos_after_base.r + DR[D1_idx_move], player_pos_after_base.c + DC[D1_idx_move]};
|
| 587 |
+
if (!is_valid_pos(S1_moved_pos) || is_blocked_pos(S1_moved_pos, g)) {
|
| 588 |
+
g = g_original_at_segment_start;
|
| 589 |
+
return false;
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
Pos S2_target_of_alter = {S1_moved_pos.r + DR[D2_idx_alter], S1_moved_pos.c + DC[D2_idx_alter]};
|
| 593 |
+
if (!is_valid_pos(S2_target_of_alter)) {
|
| 594 |
+
g = g_original_at_segment_start;
|
| 595 |
+
return false;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
res.turns += 2;
|
| 599 |
+
if (build_action_str) {
|
| 600 |
+
res.actions_str += 'M'; res.actions_str += DIR_CHARS[D1_idx_move];
|
| 601 |
+
res.actions_str += 'A'; res.actions_str += DIR_CHARS[D2_idx_alter];
|
| 602 |
+
}
|
| 603 |
+
toggle_block_pos(S2_target_of_alter, g);
|
| 604 |
+
player_pos_after_base = S1_moved_pos;
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
player_pos_ref = player_pos_after_base;
|
| 608 |
+
return true;
|
| 609 |
+
}
|
| 610 |
+
|
| 611 |
+
struct PathCacheEntry { Pos player_pos_before_segment; Grid grid_before_segment; int turns_before_segment; };
|
| 612 |
+
struct FullEvalResult { int total_turns; std::string actions_log; bool possible; };
|
| 613 |
+
|
| 614 |
+
FullEvalResult evaluate_choices(const std::vector<int>& choices, Pos initial_P, const std::vector<Pos>& targets,
|
| 615 |
+
bool build_action_str, int k_eval_start_idx,
|
| 616 |
+
const std::vector<PathCacheEntry>* reference_path_cache,
|
| 617 |
+
std::vector<PathCacheEntry>* path_cache_for_new_state) {
|
| 618 |
+
Grid current_grid_sim; Pos player_pos_sim; int total_turns_sim = 0;
|
| 619 |
+
std::string total_actions_log_sim_segments_builder = "";
|
| 620 |
+
|
| 621 |
+
if (k_eval_start_idx == 0 || reference_path_cache == nullptr || reference_path_cache->empty() || (NUM_SEGMENTS > 0 && k_eval_start_idx >= static_cast<int>(reference_path_cache->size())) ) {
|
| 622 |
+
for(int r=0; r<N_GRID; ++r) for(int c=0; c<N_GRID; ++c) current_grid_sim[r][c] = false;
|
| 623 |
+
player_pos_sim = initial_P;
|
| 624 |
+
total_turns_sim = 0;
|
| 625 |
+
if (k_eval_start_idx != 0 && NUM_SEGMENTS > 0) k_eval_start_idx = 0;
|
| 626 |
+
} else {
|
| 627 |
+
const PathCacheEntry& prev_entry = (*reference_path_cache)[k_eval_start_idx];
|
| 628 |
+
current_grid_sim = prev_entry.grid_before_segment;
|
| 629 |
+
player_pos_sim = prev_entry.player_pos_before_segment;
|
| 630 |
+
total_turns_sim = prev_entry.turns_before_segment;
|
| 631 |
+
if (total_turns_sim == INF_COST) {
|
| 632 |
+
return {INF_COST, "", false};
|
| 633 |
+
}
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
if (path_cache_for_new_state != nullptr && k_eval_start_idx > 0 && reference_path_cache != nullptr && !reference_path_cache->empty() &&
|
| 637 |
+
static_cast<int>(path_cache_for_new_state->size()) >= k_eval_start_idx && static_cast<int>(reference_path_cache->size()) >= k_eval_start_idx) {
|
| 638 |
+
std::copy(reference_path_cache->begin(), reference_path_cache->begin() + k_eval_start_idx, path_cache_for_new_state->begin());
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
for (int seg_idx = k_eval_start_idx; seg_idx < NUM_SEGMENTS; ++seg_idx) {
|
| 642 |
+
if (path_cache_for_new_state != nullptr && !path_cache_for_new_state->empty() && static_cast<int>(path_cache_for_new_state->size()) > seg_idx) {
|
| 643 |
+
(*path_cache_for_new_state)[seg_idx].player_pos_before_segment = player_pos_sim;
|
| 644 |
+
(*path_cache_for_new_state)[seg_idx].grid_before_segment = current_grid_sim;
|
| 645 |
+
(*path_cache_for_new_state)[seg_idx].turns_before_segment = total_turns_sim;
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
Pos target_P_for_segment = targets[seg_idx];
|
| 649 |
+
SegmentExecResult segment_res;
|
| 650 |
+
|
| 651 |
+
bool success = apply_combined_strategy(choices[seg_idx], player_pos_sim, target_P_for_segment, current_grid_sim, segment_res, build_action_str);
|
| 652 |
+
|
| 653 |
+
if (!success || segment_res.turns == INF_COST || total_turns_sim + segment_res.turns > MAX_TOTAL_TURNS) {
|
| 654 |
+
if (path_cache_for_new_state != nullptr && !path_cache_for_new_state->empty()) {
|
| 655 |
+
for(int fill_inf_idx = seg_idx; fill_inf_idx < NUM_SEGMENTS; ++fill_inf_idx) {
|
| 656 |
+
if (static_cast<int>(path_cache_for_new_state->size()) > fill_inf_idx)
|
| 657 |
+
(*path_cache_for_new_state)[fill_inf_idx].turns_before_segment = INF_COST;
|
| 658 |
+
}
|
| 659 |
+
}
|
| 660 |
+
return {INF_COST, "", false};
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
if (build_action_str) total_actions_log_sim_segments_builder += segment_res.actions_str;
|
| 664 |
+
total_turns_sim += segment_res.turns;
|
| 665 |
+
}
|
| 666 |
+
return {total_turns_sim, total_actions_log_sim_segments_builder, true};
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
auto time_start = std::chrono::steady_clock::now();
|
| 670 |
+
double get_elapsed_time_ms() { return std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - time_start).count(); }
|
| 671 |
+
const double TIME_LIMIT_MS = 1950.0;
|
| 672 |
+
|
| 673 |
+
enum class NeighborhoodOpType {
|
| 674 |
+
RANDOM_MULTI_SEGMENT,
|
| 675 |
+
FINE_TWEAK_SINGLE_SEGMENT,
|
| 676 |
+
GREEDY_REOPTIMIZE_SINGLE_SEGMENT
|
| 677 |
+
};
|
| 678 |
+
|
| 679 |
+
int main() {
|
| 680 |
+
std::ios_base::sync_with_stdio(false); std::cin.tie(NULL);
|
| 681 |
+
|
| 682 |
+
int N_in_dummy, M_in_dummy; std::cin >> N_in_dummy >> M_in_dummy;
|
| 683 |
+
std::cin >> G_initial_pos.r >> G_initial_pos.c;
|
| 684 |
+
|
| 685 |
+
if (NUM_SEGMENTS > 0) {
|
| 686 |
+
G_targets_vec.resize(NUM_SEGMENTS);
|
| 687 |
+
for (int k=0; k < NUM_SEGMENTS; ++k) std::cin >> G_targets_vec[k].r >> G_targets_vec[k].c;
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
std::vector<int> current_sa_choices(NUM_SEGMENTS > 0 ? NUM_SEGMENTS : 0);
|
| 691 |
+
std::vector<int> best_sa_choices(NUM_SEGMENTS > 0 ? NUM_SEGMENTS : 0);
|
| 692 |
+
|
| 693 |
+
std::vector<PathCacheEntry> current_path_cache(NUM_SEGMENTS > 0 ? NUM_SEGMENTS : 0);
|
| 694 |
+
std::vector<PathCacheEntry> neighbor_path_cache(NUM_SEGMENTS > 0 ? NUM_SEGMENTS : 0);
|
| 695 |
+
|
| 696 |
+
int current_total_turns = INF_COST;
|
| 697 |
+
int best_total_turns = INF_COST;
|
| 698 |
+
std::string best_actions_log_str = "";
|
| 699 |
+
bool best_is_from_sa = false;
|
| 700 |
+
int initial_greedy_score_turns = INF_COST;
|
| 701 |
+
|
| 702 |
+
if (NUM_SEGMENTS == 0) {
|
| 703 |
+
// No actions
|
| 704 |
+
} else {
|
| 705 |
+
Grid greedy_grid_sim_build;
|
| 706 |
+
for(int r=0; r<N_GRID; ++r) for(int c=0; c<N_GRID; ++c) greedy_grid_sim_build[r][c] = false;
|
| 707 |
+
Pos player_pos_sim_build = G_initial_pos;
|
| 708 |
+
std::string greedy_actions_log_build_temp = "";
|
| 709 |
+
int greedy_total_turns_build_temp = 0;
|
| 710 |
+
bool possible_greedy = true;
|
| 711 |
+
|
| 712 |
+
for (int k = 0; k < NUM_SEGMENTS; ++k) {
|
| 713 |
+
current_path_cache[k].player_pos_before_segment = player_pos_sim_build;
|
| 714 |
+
current_path_cache[k].grid_before_segment = greedy_grid_sim_build;
|
| 715 |
+
current_path_cache[k].turns_before_segment = greedy_total_turns_build_temp;
|
| 716 |
+
|
| 717 |
+
Pos target_P_k = G_targets_vec[k];
|
| 718 |
+
int current_best_strategy_code_for_k = -1;
|
| 719 |
+
int current_min_turns_for_segment_k = INF_COST;
|
| 720 |
+
|
| 721 |
+
// Fast greedy enumeration using two BFS-all precomputations (no action string build).
|
| 722 |
+
// Docstring: Precompute distance maps once per segment to evaluate all base+post options
|
| 723 |
+
// without re-running BFS per candidate. We compute:
|
| 724 |
+
// - dist_free_k: distances allowing sliding over the target (used for direct path).
|
| 725 |
+
// - dist_avoid_k: distances strictly avoiding passing over the target (used for slides/unblock).
|
| 726 |
+
// Post-alter validity is checked with simple local geometry and grid checks.
|
| 727 |
+
std::array<std::array<int, N_GRID>, N_GRID> dist_free_k, dist_avoid_k;
|
| 728 |
+
std::array<std::array<std::pair<Pos, std::pair<char, char>>, N_GRID>, N_GRID> dummy_cf1, dummy_cf2;
|
| 729 |
+
bfs_all(player_pos_sim_build, greedy_grid_sim_build, INVALID_POS, false, dist_free_k, dummy_cf1, false);
|
| 730 |
+
bfs_all(player_pos_sim_build, greedy_grid_sim_build, target_P_k, true, dist_avoid_k, dummy_cf2, false);
|
| 731 |
+
|
| 732 |
+
auto consider_post = [&](int base_code_candidate, int base_cost) {
|
| 733 |
+
// Player ends at target after any base in our set (direct, unblock, slide0).
|
| 734 |
+
// Tie-break policy when total_cost is equal:
|
| 735 |
+
// - prefer smaller post_alter_option_code (fewer/lighter edits),
|
| 736 |
+
// - then prefer smaller base_code (direct over others).
|
| 737 |
+
for (int post_alter_option_code = 0; post_alter_option_code < NUM_POST_ALTER_OPTIONS; ++post_alter_option_code) {
|
| 738 |
+
int add_cost = 0;
|
| 739 |
+
bool ok = true;
|
| 740 |
+
|
| 741 |
+
if (post_alter_option_code == 0) {
|
| 742 |
+
add_cost = 0; // no-op
|
| 743 |
+
} else if (post_alter_option_code < NUM_POST_ALTER_OPTIONS_CUMULATIVE_ADJACENT) {
|
| 744 |
+
int alter_dir_idx = post_alter_option_code - NUM_POST_ALTER_OPTIONS_CUMULATIVE_NONE;
|
| 745 |
+
Pos alter_on = {target_P_k.r + DR[alter_dir_idx], target_P_k.c + DC[alter_dir_idx]};
|
| 746 |
+
if (!is_valid_pos(alter_on)) ok = false;
|
| 747 |
+
add_cost = 1;
|
| 748 |
+
} else {
|
| 749 |
+
int offset_code = post_alter_option_code - NUM_POST_ALTER_OPTIONS_CUMULATIVE_ADJACENT;
|
| 750 |
+
int D1_idx_move = offset_code / 3;
|
| 751 |
+
int D2_choice_idx_alter = offset_code % 3;
|
| 752 |
+
|
| 753 |
+
int D2_idx_alter = -1, cc = 0;
|
| 754 |
+
for (int d_candidate = 0; d_candidate < 4; ++d_candidate) {
|
| 755 |
+
if (d_candidate == DIR_REV_IDX[D1_idx_move]) continue;
|
| 756 |
+
if (cc == D2_choice_idx_alter) { D2_idx_alter = d_candidate; break; }
|
| 757 |
+
++cc;
|
| 758 |
+
}
|
| 759 |
+
Pos S1 = {target_P_k.r + DR[D1_idx_move], target_P_k.c + DC[D1_idx_move]};
|
| 760 |
+
if (!is_valid_pos(S1) || is_blocked_pos(S1, greedy_grid_sim_build)) ok = false;
|
| 761 |
+
Pos S2 = {S1.r + DR[D2_idx_alter], S1.c + DC[D2_idx_alter]};
|
| 762 |
+
if (!is_valid_pos(S2)) ok = false;
|
| 763 |
+
add_cost = 2;
|
| 764 |
+
}
|
| 765 |
+
if (!ok) continue;
|
| 766 |
+
|
| 767 |
+
int total_cost = base_cost + add_cost;
|
| 768 |
+
int candidate_code = post_alter_option_code * NUM_BASE_STRATEGIES + base_code_candidate;
|
| 769 |
+
if (total_cost < current_min_turns_for_segment_k ||
|
| 770 |
+
(total_cost == current_min_turns_for_segment_k && (current_best_strategy_code_for_k == -1 || candidate_code < current_best_strategy_code_for_k))) {
|
| 771 |
+
current_min_turns_for_segment_k = total_cost;
|
| 772 |
+
current_best_strategy_code_for_k = candidate_code;
|
| 773 |
+
if (current_min_turns_for_segment_k == 1) return; // cannot beat 1
|
| 774 |
+
}
|
| 775 |
+
}
|
| 776 |
+
};
|
| 777 |
+
|
| 778 |
+
if (!is_blocked_pos(target_P_k, greedy_grid_sim_build)) {
|
| 779 |
+
// Direct
|
| 780 |
+
int ddir = dist_free_k[target_P_k.r][target_P_k.c];
|
| 781 |
+
if (ddir != INF_COST) consider_post(0, ddir);
|
| 782 |
+
|
| 783 |
+
// Slides type0 (need a wall past target; path to slide_start must avoid passing over target)
|
| 784 |
+
for (int dir = 0; dir < 4 && current_min_turns_for_segment_k != 1; ++dir) {
|
| 785 |
+
Pos slide_start = {target_P_k.r - DR[dir], target_P_k.c - DC[dir]};
|
| 786 |
+
Pos block_at = {target_P_k.r + DR[dir], target_P_k.c + DC[dir]};
|
| 787 |
+
bool wall_exists = !is_valid_pos(block_at) || greedy_grid_sim_build[block_at.r][block_at.c];
|
| 788 |
+
if (!is_valid_pos(slide_start) || !wall_exists) continue;
|
| 789 |
+
int ds = dist_avoid_k[slide_start.r][slide_start.c];
|
| 790 |
+
if (ds != INF_COST) consider_post(2 + dir, ds + 1);
|
| 791 |
+
}
|
| 792 |
+
} else {
|
| 793 |
+
// Unblock-and-go
|
| 794 |
+
int best_adj_cost = INF_COST;
|
| 795 |
+
for (int i = 0; i < 4; ++i) {
|
| 796 |
+
Pos adj = {target_P_k.r + DR[i], target_P_k.c + DC[i]};
|
| 797 |
+
if (!is_valid_pos(adj) || is_blocked_pos(adj, greedy_grid_sim_build)) continue;
|
| 798 |
+
best_adj_cost = std::min(best_adj_cost, dist_avoid_k[adj.r][adj.c]);
|
| 799 |
+
}
|
| 800 |
+
if (best_adj_cost != INF_COST) consider_post(1, best_adj_cost + 2);
|
| 801 |
+
}
|
| 802 |
+
|
| 803 |
+
if (current_best_strategy_code_for_k == -1 || greedy_total_turns_build_temp + current_min_turns_for_segment_k > MAX_TOTAL_TURNS) {
|
| 804 |
+
possible_greedy = false; break;
|
| 805 |
+
}
|
| 806 |
+
|
| 807 |
+
current_sa_choices[k] = current_best_strategy_code_for_k;
|
| 808 |
+
|
| 809 |
+
SegmentExecResult final_segment_res_for_k_build;
|
| 810 |
+
apply_combined_strategy(current_best_strategy_code_for_k,
|
| 811 |
+
player_pos_sim_build,
|
| 812 |
+
target_P_k,
|
| 813 |
+
greedy_grid_sim_build,
|
| 814 |
+
final_segment_res_for_k_build,
|
| 815 |
+
false);
|
| 816 |
+
greedy_total_turns_build_temp += final_segment_res_for_k_build.turns;
|
| 817 |
+
}
|
| 818 |
+
|
| 819 |
+
if(possible_greedy) {
|
| 820 |
+
current_total_turns = greedy_total_turns_build_temp;
|
| 821 |
+
best_total_turns = greedy_total_turns_build_temp;
|
| 822 |
+
initial_greedy_score_turns = greedy_total_turns_build_temp;
|
| 823 |
+
best_sa_choices = current_sa_choices;
|
| 824 |
+
best_actions_log_str.clear();
|
| 825 |
+
} else {
|
| 826 |
+
Grid fallback_grid_sim; for(int r=0; r<N_GRID; ++r) for(int c=0; c<N_GRID; ++c) fallback_grid_sim[r][c] = false;
|
| 827 |
+
Pos fallback_player_pos = G_initial_pos;
|
| 828 |
+
int fallback_total_turns = 0;
|
| 829 |
+
|
| 830 |
+
for(int k_fallback=0; k_fallback<NUM_SEGMENTS; ++k_fallback) {
|
| 831 |
+
current_path_cache[k_fallback].player_pos_before_segment = fallback_player_pos;
|
| 832 |
+
current_path_cache[k_fallback].grid_before_segment = fallback_grid_sim;
|
| 833 |
+
current_path_cache[k_fallback].turns_before_segment = fallback_total_turns;
|
| 834 |
+
|
| 835 |
+
Pos target_P_k_fallback = G_targets_vec[k_fallback];
|
| 836 |
+
int chosen_code_fallback = -1;
|
| 837 |
+
SegmentExecResult res_simple_direct, res_simple_unblock;
|
| 838 |
+
|
| 839 |
+
Grid temp_grid_direct = fallback_grid_sim; Pos temp_pos_direct = fallback_player_pos;
|
| 840 |
+
bool success_direct = apply_combined_strategy(0, temp_pos_direct, target_P_k_fallback, temp_grid_direct, res_simple_direct, false);
|
| 841 |
+
|
| 842 |
+
Grid temp_grid_unblock = fallback_grid_sim; Pos temp_pos_unblock = fallback_player_pos;
|
| 843 |
+
bool success_unblock = apply_combined_strategy(1, temp_pos_unblock, target_P_k_fallback, temp_grid_unblock, res_simple_unblock, false);
|
| 844 |
+
|
| 845 |
+
if (success_direct && (!success_unblock || res_simple_direct.turns <= res_simple_unblock.turns)) {
|
| 846 |
+
chosen_code_fallback = 0;
|
| 847 |
+
} else if (success_unblock) {
|
| 848 |
+
chosen_code_fallback = 1;
|
| 849 |
+
} else {
|
| 850 |
+
chosen_code_fallback = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng);
|
| 851 |
+
}
|
| 852 |
+
current_sa_choices[k_fallback] = chosen_code_fallback;
|
| 853 |
+
|
| 854 |
+
SegmentExecResult temp_res_chosen_fallback;
|
| 855 |
+
bool success_chosen_fb = apply_combined_strategy(chosen_code_fallback, fallback_player_pos, target_P_k_fallback, fallback_grid_sim, temp_res_chosen_fallback, false);
|
| 856 |
+
if (!success_chosen_fb || fallback_total_turns + temp_res_chosen_fallback.turns > MAX_TOTAL_TURNS) {
|
| 857 |
+
for(int fill_idx = k_fallback; fill_idx < NUM_SEGMENTS; ++fill_idx) {
|
| 858 |
+
if (static_cast<int>(current_path_cache.size()) > fill_idx)
|
| 859 |
+
current_path_cache[fill_idx].turns_before_segment = INF_COST;
|
| 860 |
+
}
|
| 861 |
+
break;
|
| 862 |
+
}
|
| 863 |
+
fallback_total_turns += temp_res_chosen_fallback.turns;
|
| 864 |
+
}
|
| 865 |
+
|
| 866 |
+
FullEvalResult fallback_eval = evaluate_choices(current_sa_choices, G_initial_pos, G_targets_vec, false, 0, nullptr, ¤t_path_cache);
|
| 867 |
+
if (fallback_eval.possible) {
|
| 868 |
+
current_total_turns = fallback_eval.total_turns;
|
| 869 |
+
if (current_total_turns < best_total_turns) {
|
| 870 |
+
best_total_turns = current_total_turns;
|
| 871 |
+
best_sa_choices = current_sa_choices;
|
| 872 |
+
best_is_from_sa = true;
|
| 873 |
+
}
|
| 874 |
+
} else { current_total_turns = INF_COST; }
|
| 875 |
+
|
| 876 |
+
if (current_total_turns == INF_COST) {
|
| 877 |
+
for(int k_rand_init=0; k_rand_init<NUM_SEGMENTS; ++k_rand_init) {
|
| 878 |
+
current_sa_choices[k_rand_init] = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng);
|
| 879 |
+
}
|
| 880 |
+
FullEvalResult random_init_eval = evaluate_choices(current_sa_choices, G_initial_pos, G_targets_vec, false, 0, nullptr, ¤t_path_cache);
|
| 881 |
+
if (random_init_eval.possible) {
|
| 882 |
+
current_total_turns = random_init_eval.total_turns;
|
| 883 |
+
if (current_total_turns < best_total_turns) {
|
| 884 |
+
best_total_turns = current_total_turns;
|
| 885 |
+
best_sa_choices = current_sa_choices;
|
| 886 |
+
best_is_from_sa = true;
|
| 887 |
+
}
|
| 888 |
+
}
|
| 889 |
+
}
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
double T_param_start = 20.0, T_param_end = 0.01;
|
| 893 |
+
std::vector<int> segment_indices_for_shuffle(NUM_SEGMENTS);
|
| 894 |
+
if (NUM_SEGMENTS > 0) std::iota(segment_indices_for_shuffle.begin(), segment_indices_for_shuffle.end(), 0);
|
| 895 |
+
|
| 896 |
+
int iterations_stuck_at_inf = 0;
|
| 897 |
+
const int MAX_STUCK_ITERATIONS_FOR_RANDOM_RESTART = 50;
|
| 898 |
+
|
| 899 |
+
while (get_elapsed_time_ms() < TIME_LIMIT_MS) {
|
| 900 |
+
if (current_total_turns == INF_COST) {
|
| 901 |
+
iterations_stuck_at_inf++;
|
| 902 |
+
if (iterations_stuck_at_inf > MAX_STUCK_ITERATIONS_FOR_RANDOM_RESTART) {
|
| 903 |
+
iterations_stuck_at_inf = 0;
|
| 904 |
+
for(int k_rand_init=0; k_rand_init<NUM_SEGMENTS; ++k_rand_init) {
|
| 905 |
+
current_sa_choices[k_rand_init] = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng);
|
| 906 |
+
}
|
| 907 |
+
FullEvalResult random_restart_eval = evaluate_choices(current_sa_choices, G_initial_pos, G_targets_vec, false, 0, nullptr, ¤t_path_cache);
|
| 908 |
+
if (random_restart_eval.possible) {
|
| 909 |
+
current_total_turns = random_restart_eval.total_turns;
|
| 910 |
+
if (current_total_turns < best_total_turns) {
|
| 911 |
+
best_total_turns = current_total_turns;
|
| 912 |
+
best_sa_choices = current_sa_choices;
|
| 913 |
+
best_is_from_sa = true;
|
| 914 |
+
}
|
| 915 |
+
}
|
| 916 |
+
}
|
| 917 |
+
} else {
|
| 918 |
+
iterations_stuck_at_inf = 0;
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
if (NUM_SEGMENTS == 0) break;
|
| 922 |
+
|
| 923 |
+
std::vector<int> neighbor_sa_choices_temp = current_sa_choices;
|
| 924 |
+
int k_eval_start_idx = NUM_SEGMENTS;
|
| 925 |
+
bool changed_anything_in_choices_vector = false;
|
| 926 |
+
|
| 927 |
+
double op_type_roll = std::uniform_real_distribution<>(0.0, 1.0)(rng);
|
| 928 |
+
NeighborhoodOpType current_op_type_local;
|
| 929 |
+
|
| 930 |
+
if (op_type_roll < 0.50) current_op_type_local = NeighborhoodOpType::RANDOM_MULTI_SEGMENT;
|
| 931 |
+
else if (op_type_roll < 0.85) current_op_type_local = NeighborhoodOpType::FINE_TWEAK_SINGLE_SEGMENT;
|
| 932 |
+
else current_op_type_local = NeighborhoodOpType::GREEDY_REOPTIMIZE_SINGLE_SEGMENT;
|
| 933 |
+
|
| 934 |
+
if (current_op_type_local == NeighborhoodOpType::RANDOM_MULTI_SEGMENT) {
|
| 935 |
+
int num_local_changes;
|
| 936 |
+
double r_nc_dist = std::uniform_real_distribution<>(0.0, 1.0)(rng);
|
| 937 |
+
int max_pert_base = std::max(1, NUM_SEGMENTS / 5);
|
| 938 |
+
|
| 939 |
+
if (r_nc_dist < 0.60) num_local_changes = 1;
|
| 940 |
+
else if (r_nc_dist < 0.85) num_local_changes = 2;
|
| 941 |
+
else if (r_nc_dist < 0.95) num_local_changes = 3;
|
| 942 |
+
else num_local_changes = std::min(NUM_SEGMENTS,
|
| 943 |
+
static_cast<int>(4 + std::uniform_int_distribution<>(0, std::max(0, max_pert_base - 4))(rng))
|
| 944 |
+
);
|
| 945 |
+
|
| 946 |
+
num_local_changes = std::min(num_local_changes, NUM_SEGMENTS);
|
| 947 |
+
num_local_changes = std::max(1, num_local_changes);
|
| 948 |
+
|
| 949 |
+
changed_anything_in_choices_vector = true;
|
| 950 |
+
double r_mt_dist = std::uniform_real_distribution<>(0.0, 1.0)(rng);
|
| 951 |
+
if (r_mt_dist < 0.80 || num_local_changes >= NUM_SEGMENTS ) {
|
| 952 |
+
std::shuffle(segment_indices_for_shuffle.begin(), segment_indices_for_shuffle.end(), rng);
|
| 953 |
+
int min_k_changed_val = NUM_SEGMENTS;
|
| 954 |
+
for (int i_change = 0; i_change < num_local_changes; ++i_change) {
|
| 955 |
+
int k_to_change = segment_indices_for_shuffle[i_change];
|
| 956 |
+
min_k_changed_val = std::min(min_k_changed_val, k_to_change);
|
| 957 |
+
|
| 958 |
+
int old_code = neighbor_sa_choices_temp[k_to_change];
|
| 959 |
+
int new_code = old_code;
|
| 960 |
+
if (TOTAL_STRATEGIES_PER_SEGMENT > 1) {
|
| 961 |
+
do { new_code = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng); } while (new_code == old_code);
|
| 962 |
+
} else { new_code = 0; }
|
| 963 |
+
neighbor_sa_choices_temp[k_to_change] = new_code;
|
| 964 |
+
}
|
| 965 |
+
k_eval_start_idx = min_k_changed_val;
|
| 966 |
+
} else {
|
| 967 |
+
int L = num_local_changes;
|
| 968 |
+
int k_start_block = std::uniform_int_distribution<>(0, NUM_SEGMENTS - L)(rng);
|
| 969 |
+
for (int i = 0; i < L; ++i) {
|
| 970 |
+
int k_to_change = k_start_block + i;
|
| 971 |
+
int old_code = neighbor_sa_choices_temp[k_to_change];
|
| 972 |
+
int new_code = old_code;
|
| 973 |
+
if (TOTAL_STRATEGIES_PER_SEGMENT > 1) {
|
| 974 |
+
do { new_code = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng); } while (new_code == old_code);
|
| 975 |
+
} else { new_code = 0; }
|
| 976 |
+
neighbor_sa_choices_temp[k_to_change] = new_code;
|
| 977 |
+
}
|
| 978 |
+
k_eval_start_idx = k_start_block;
|
| 979 |
+
}
|
| 980 |
+
} else if (current_op_type_local == NeighborhoodOpType::FINE_TWEAK_SINGLE_SEGMENT) {
|
| 981 |
+
changed_anything_in_choices_vector = true;
|
| 982 |
+
int k_to_change = std::uniform_int_distribution<>(0, NUM_SEGMENTS - 1)(rng);
|
| 983 |
+
k_eval_start_idx = k_to_change;
|
| 984 |
+
int current_strategy_code = neighbor_sa_choices_temp[k_to_change];
|
| 985 |
+
int base_code = current_strategy_code % NUM_BASE_STRATEGIES;
|
| 986 |
+
int post_alter_code = current_strategy_code / NUM_BASE_STRATEGIES;
|
| 987 |
+
|
| 988 |
+
double tweak_type_rand = std::uniform_real_distribution<>(0.0, 1.0)(rng);
|
| 989 |
+
if (tweak_type_rand < 0.5 && NUM_POST_ALTER_OPTIONS > 1) {
|
| 990 |
+
int new_post_alter_code = post_alter_code;
|
| 991 |
+
do { new_post_alter_code = std::uniform_int_distribution<>(0, NUM_POST_ALTER_OPTIONS - 1)(rng); } while (new_post_alter_code == post_alter_code);
|
| 992 |
+
neighbor_sa_choices_temp[k_to_change] = new_post_alter_code * NUM_BASE_STRATEGIES + base_code;
|
| 993 |
+
} else if (NUM_BASE_STRATEGIES > 1) {
|
| 994 |
+
int new_base_code = base_code;
|
| 995 |
+
do { new_base_code = std::uniform_int_distribution<>(0, NUM_BASE_STRATEGIES - 1)(rng); } while (new_base_code == base_code);
|
| 996 |
+
neighbor_sa_choices_temp[k_to_change] = post_alter_code * NUM_BASE_STRATEGIES + new_base_code;
|
| 997 |
+
} else {
|
| 998 |
+
if (TOTAL_STRATEGIES_PER_SEGMENT > 1) {
|
| 999 |
+
int new_code = current_strategy_code;
|
| 1000 |
+
do { new_code = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng); } while (new_code == current_strategy_code);
|
| 1001 |
+
neighbor_sa_choices_temp[k_to_change] = new_code;
|
| 1002 |
+
} else { changed_anything_in_choices_vector = false; }
|
| 1003 |
+
}
|
| 1004 |
+
if (neighbor_sa_choices_temp[k_to_change] == current_sa_choices[k_to_change]) {
|
| 1005 |
+
changed_anything_in_choices_vector = false;
|
| 1006 |
+
}
|
| 1007 |
+
|
| 1008 |
+
} else { // GREEDY_REOPTIMIZE_SINGLE_SEGMENT
|
| 1009 |
+
int k_to_reoptimize = std::uniform_int_distribution<>(0, NUM_SEGMENTS - 1)(rng);
|
| 1010 |
+
|
| 1011 |
+
if (current_total_turns == INF_COST || current_path_cache.empty() ||
|
| 1012 |
+
k_to_reoptimize >= static_cast<int>(current_path_cache.size()) ||
|
| 1013 |
+
current_path_cache[k_to_reoptimize].turns_before_segment == INF_COST) {
|
| 1014 |
+
changed_anything_in_choices_vector = false;
|
| 1015 |
+
} else {
|
| 1016 |
+
k_eval_start_idx = k_to_reoptimize;
|
| 1017 |
+
|
| 1018 |
+
Pos player_pos_before_k = current_path_cache[k_to_reoptimize].player_pos_before_segment;
|
| 1019 |
+
Grid grid_before_k = current_path_cache[k_to_reoptimize].grid_before_segment;
|
| 1020 |
+
Pos target_P_k = G_targets_vec[k_to_reoptimize];
|
| 1021 |
+
|
| 1022 |
+
int original_choice_for_k = current_sa_choices[k_to_reoptimize];
|
| 1023 |
+
int best_strategy_for_k = original_choice_for_k;
|
| 1024 |
+
SegmentExecResult best_res_for_k_eval;
|
| 1025 |
+
|
| 1026 |
+
Grid temp_grid_eval_current = grid_before_k; Pos temp_player_pos_eval_current = player_pos_before_k;
|
| 1027 |
+
bool current_choice_possible = apply_combined_strategy(original_choice_for_k, temp_player_pos_eval_current, target_P_k, temp_grid_eval_current, best_res_for_k_eval, false);
|
| 1028 |
+
if (!current_choice_possible) best_res_for_k_eval.turns = INF_COST;
|
| 1029 |
+
|
| 1030 |
+
for (int i = 0; i < GREEDY_REOPTIMIZE_SUBSET_SIZE; ++i) {
|
| 1031 |
+
int code_to_try = std::uniform_int_distribution<>(0, TOTAL_STRATEGIES_PER_SEGMENT - 1)(rng);
|
| 1032 |
+
if (code_to_try == original_choice_for_k && current_choice_possible) {
|
| 1033 |
+
continue;
|
| 1034 |
+
}
|
| 1035 |
+
|
| 1036 |
+
SegmentExecResult current_segment_res_eval;
|
| 1037 |
+
Grid temp_grid_iter_eval = grid_before_k;
|
| 1038 |
+
Pos temp_player_pos_iter_eval = player_pos_before_k;
|
| 1039 |
+
bool success = apply_combined_strategy(code_to_try, temp_player_pos_iter_eval, target_P_k, temp_grid_iter_eval, current_segment_res_eval, false);
|
| 1040 |
+
|
| 1041 |
+
if (success && current_segment_res_eval.turns < best_res_for_k_eval.turns) {
|
| 1042 |
+
best_res_for_k_eval.turns = current_segment_res_eval.turns;
|
| 1043 |
+
best_strategy_for_k = code_to_try;
|
| 1044 |
+
if (best_res_for_k_eval.turns <= 1) break;
|
| 1045 |
+
}
|
| 1046 |
+
}
|
| 1047 |
+
neighbor_sa_choices_temp[k_to_reoptimize] = best_strategy_for_k;
|
| 1048 |
+
if (best_strategy_for_k != original_choice_for_k) {
|
| 1049 |
+
changed_anything_in_choices_vector = true;
|
| 1050 |
+
}
|
| 1051 |
+
}
|
| 1052 |
+
}
|
| 1053 |
+
|
| 1054 |
+
if (!changed_anything_in_choices_vector) continue;
|
| 1055 |
+
|
| 1056 |
+
FullEvalResult neighbor_eval_res = evaluate_choices(neighbor_sa_choices_temp, G_initial_pos, G_targets_vec,
|
| 1057 |
+
false, k_eval_start_idx,
|
| 1058 |
+
¤t_path_cache, &neighbor_path_cache);
|
| 1059 |
+
|
| 1060 |
+
if (neighbor_eval_res.possible) {
|
| 1061 |
+
bool accepted = false;
|
| 1062 |
+
if (neighbor_eval_res.total_turns < current_total_turns) { accepted = true; }
|
| 1063 |
+
else if (current_total_turns != INF_COST) {
|
| 1064 |
+
double temperature = T_param_start;
|
| 1065 |
+
double progress = get_elapsed_time_ms() / TIME_LIMIT_MS;
|
| 1066 |
+
if (progress < 1.0 && progress >=0.0) { temperature = T_param_start * std::pow(T_param_end / T_param_start, progress); }
|
| 1067 |
+
else if (progress >= 1.0) { temperature = T_param_end; }
|
| 1068 |
+
temperature = std::max(temperature, T_param_end);
|
| 1069 |
+
if (temperature > 1e-9) {
|
| 1070 |
+
double delta_cost = static_cast<double>(neighbor_eval_res.total_turns - current_total_turns);
|
| 1071 |
+
if (std::exp(-delta_cost / temperature) > std::uniform_real_distribution<>(0.0, 1.0)(rng) ) { accepted = true; }
|
| 1072 |
+
}
|
| 1073 |
+
} else {
|
| 1074 |
+
accepted = true;
|
| 1075 |
+
}
|
| 1076 |
+
|
| 1077 |
+
if (accepted) {
|
| 1078 |
+
current_sa_choices.swap(neighbor_sa_choices_temp);
|
| 1079 |
+
current_total_turns = neighbor_eval_res.total_turns;
|
| 1080 |
+
if (!current_path_cache.empty() && !neighbor_path_cache.empty()) {
|
| 1081 |
+
current_path_cache.swap(neighbor_path_cache);
|
| 1082 |
+
}
|
| 1083 |
+
|
| 1084 |
+
if (current_total_turns < best_total_turns) {
|
| 1085 |
+
best_total_turns = current_total_turns;
|
| 1086 |
+
best_sa_choices = current_sa_choices;
|
| 1087 |
+
best_is_from_sa = true;
|
| 1088 |
+
}
|
| 1089 |
+
}
|
| 1090 |
+
}
|
| 1091 |
+
}
|
| 1092 |
+
|
| 1093 |
+
if (best_total_turns == INF_COST) {
|
| 1094 |
+
best_actions_log_str.clear();
|
| 1095 |
+
} else {
|
| 1096 |
+
FullEvalResult final_best_res = evaluate_choices(best_sa_choices, G_initial_pos, G_targets_vec, true, 0, nullptr, nullptr);
|
| 1097 |
+
if (final_best_res.possible) {
|
| 1098 |
+
best_actions_log_str = final_best_res.actions_log;
|
| 1099 |
+
} else {
|
| 1100 |
+
best_actions_log_str.clear();
|
| 1101 |
+
}
|
| 1102 |
+
}
|
| 1103 |
+
}
|
| 1104 |
+
|
| 1105 |
+
const std::string& final_actions_to_print = best_actions_log_str;
|
| 1106 |
+
for (size_t i = 0; i < final_actions_to_print.length(); i += 2) {
|
| 1107 |
+
std::cout << final_actions_to_print[i] << " " << final_actions_to_print[i+1] << "\n";
|
| 1108 |
+
}
|
| 1109 |
+
return 0;
|
| 1110 |
+
}
|
| 1111 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale_agent_best/ahc008.cpp
ADDED
|
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <algorithm>
|
| 6 |
+
// #include <map>
|
| 7 |
+
// #include <set>
|
| 8 |
+
#include <queue>
|
| 9 |
+
#include <cmath>
|
| 10 |
+
#include <iomanip>
|
| 11 |
+
#include <limits>
|
| 12 |
+
|
| 13 |
+
// --- Constants ---
|
| 14 |
+
constexpr int GRID_SIZE = 30;
|
| 15 |
+
constexpr int NUM_TURNS = 300;
|
| 16 |
+
constexpr int INF = std::numeric_limits<int>::max();
|
| 17 |
+
|
| 18 |
+
struct Point {
|
| 19 |
+
int r, c;
|
| 20 |
+
|
| 21 |
+
bool operator==(const Point& other) const { return r == other.r && c == other.c; }
|
| 22 |
+
bool operator!=(const Point& other) const { return !(*this == other); }
|
| 23 |
+
bool operator<(const Point& other) const {
|
| 24 |
+
if (r != other.r) return r < other.r;
|
| 25 |
+
return c < other.c;
|
| 26 |
+
}
|
| 27 |
+
};
|
| 28 |
+
const Point INVALID_POINT = {-1, -1};
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
// Tunable parameters
|
| 32 |
+
constexpr int STAND_OUTSIDE_INNER_SAFE_PENALTY = 1000;
|
| 33 |
+
constexpr int ADJACENT_WALL_PRIORITY_BONUS = 0;
|
| 34 |
+
constexpr int NEAR_PET_PENALTY_POINTS_PER_PET = 0;
|
| 35 |
+
constexpr int NEAR_PET_RADIUS = 2;
|
| 36 |
+
constexpr int MAX_STUCK_TURNS = 10; // Slightly increased
|
| 37 |
+
|
| 38 |
+
// Directions: Up, Down, Left, Right (indices 0, 1, 2, 3)
|
| 39 |
+
const Point DIRS[4] = {{-1, 0}, {1, 0}, {0, -1}, {0, 1}};
|
| 40 |
+
const char DIR_CHARS_BUILD[4] = {'u', 'd', 'l', 'r'};
|
| 41 |
+
const char DIR_CHARS_MOVE[4] = {'U', 'D', 'L', 'R'};
|
| 42 |
+
const char PET_MOVE_CHARS[4] = {'U', 'D', 'L', 'R'};
|
| 43 |
+
|
| 44 |
+
struct PetInfo {
|
| 45 |
+
Point pos;
|
| 46 |
+
int type;
|
| 47 |
+
int id;
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
enum class HumanObjective {
|
| 51 |
+
BUILDING_WALLS,
|
| 52 |
+
GOING_TO_SAFE_SPOT,
|
| 53 |
+
STAYING_IN_SAFE_SPOT,
|
| 54 |
+
REPOSITIONING_STUCK
|
| 55 |
+
// FLEEING_PET_IN_PEN removed, simplified objective setting
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
struct HumanInfo {
|
| 59 |
+
Point pos;
|
| 60 |
+
int id;
|
| 61 |
+
|
| 62 |
+
int strip_r_start;
|
| 63 |
+
int strip_r_end;
|
| 64 |
+
|
| 65 |
+
Point inner_safe_ul;
|
| 66 |
+
Point inner_safe_br;
|
| 67 |
+
Point final_stand_pos;
|
| 68 |
+
|
| 69 |
+
std::vector<Point> assigned_wall_cells;
|
| 70 |
+
HumanObjective objective;
|
| 71 |
+
int turns_stuck_building = 0;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
// --- Game Grid and State ---
|
| 75 |
+
bool is_impassable_grid_static[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 76 |
+
std::vector<PetInfo> pets_global_state;
|
| 77 |
+
std::vector<HumanInfo> humans_global_state;
|
| 78 |
+
int N_pets_global, M_humans_global;
|
| 79 |
+
|
| 80 |
+
Point bfs_parent_grid[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 81 |
+
bool bfs_visited_grid[GRID_SIZE + 1][GRID_SIZE + 1];
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
// --- Utility Functions ---
|
| 85 |
+
bool is_valid_coord(int val) {
|
| 86 |
+
return val >= 1 && val <= GRID_SIZE;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
bool is_valid_point(Point p) {
|
| 90 |
+
return is_valid_coord(p.r) && is_valid_coord(p.c);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
int manhattan_distance(Point p1, Point p2) {
|
| 94 |
+
if (!is_valid_point(p1) || !is_valid_point(p2)) return INF;
|
| 95 |
+
return std::abs(p1.r - p2.r) + std::abs(p1.c - p2.c);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
int count_adjacent_walls_or_boundaries(Point p) {
|
| 99 |
+
int count = 0;
|
| 100 |
+
for (int i = 0; i < 4; ++i) {
|
| 101 |
+
Point neighbor = {p.r + DIRS[i].r, p.c + DIRS[i].c};
|
| 102 |
+
if (!is_valid_point(neighbor) || (is_valid_point(neighbor) && is_impassable_grid_static[neighbor.r][neighbor.c])) {
|
| 103 |
+
count++;
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
return count;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
bool can_theoretically_build_at(Point wall_pos, int builder_human_id) {
|
| 110 |
+
if (!is_valid_point(wall_pos)) return false;
|
| 111 |
+
if (is_impassable_grid_static[wall_pos.r][wall_pos.c]) return false;
|
| 112 |
+
|
| 113 |
+
for (const auto& pet : pets_global_state) {
|
| 114 |
+
if (pet.pos == wall_pos) return false;
|
| 115 |
+
if (manhattan_distance(wall_pos, pet.pos) == 1) return false;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
for (const auto& human : humans_global_state) {
|
| 119 |
+
if (human.id == builder_human_id) continue; // Builder themself can be adjacent
|
| 120 |
+
if (human.pos == wall_pos) return false; // Other human on the wall_pos
|
| 121 |
+
}
|
| 122 |
+
return true;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
char get_bfs_move_char(Point start_pos, Point target_pos,
|
| 126 |
+
const std::vector<Point>& current_turn_tentative_walls) {
|
| 127 |
+
if (start_pos == target_pos) return '.';
|
| 128 |
+
|
| 129 |
+
std::queue<Point> q;
|
| 130 |
+
q.push(start_pos);
|
| 131 |
+
|
| 132 |
+
for(int r_bfs = 1; r_bfs <= GRID_SIZE; ++r_bfs) for(int c_bfs = 1; c_bfs <= GRID_SIZE; ++c_bfs) {
|
| 133 |
+
bfs_visited_grid[r_bfs][c_bfs] = false;
|
| 134 |
+
bfs_parent_grid[r_bfs][c_bfs] = INVALID_POINT;
|
| 135 |
+
}
|
| 136 |
+
if (!is_valid_point(start_pos)) return '.';
|
| 137 |
+
bfs_visited_grid[start_pos.r][start_pos.c] = true;
|
| 138 |
+
|
| 139 |
+
Point path_found_dest = INVALID_POINT;
|
| 140 |
+
|
| 141 |
+
while(!q.empty()){
|
| 142 |
+
Point curr = q.front();
|
| 143 |
+
q.pop();
|
| 144 |
+
|
| 145 |
+
for(int i_dir=0; i_dir < 4; ++i_dir){
|
| 146 |
+
Point next_p = {curr.r + DIRS[i_dir].r, curr.c + DIRS[i_dir].c};
|
| 147 |
+
|
| 148 |
+
if(is_valid_point(next_p) &&
|
| 149 |
+
!is_impassable_grid_static[next_p.r][next_p.c] &&
|
| 150 |
+
!bfs_visited_grid[next_p.r][next_p.c]){
|
| 151 |
+
|
| 152 |
+
bool is_tentative_wall_conflict = false;
|
| 153 |
+
for(const auto& tw : current_turn_tentative_walls) {
|
| 154 |
+
if(next_p == tw) {
|
| 155 |
+
is_tentative_wall_conflict = true;
|
| 156 |
+
break;
|
| 157 |
+
}
|
| 158 |
+
}
|
| 159 |
+
if(is_tentative_wall_conflict) continue;
|
| 160 |
+
|
| 161 |
+
bfs_visited_grid[next_p.r][next_p.c] = true;
|
| 162 |
+
bfs_parent_grid[next_p.r][next_p.c] = curr;
|
| 163 |
+
|
| 164 |
+
if (next_p == target_pos) {
|
| 165 |
+
path_found_dest = next_p;
|
| 166 |
+
goto bfs_done_label;
|
| 167 |
+
}
|
| 168 |
+
q.push(next_p);
|
| 169 |
+
}
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
bfs_done_label:;
|
| 174 |
+
if (path_found_dest.r == -1) return '.';
|
| 175 |
+
|
| 176 |
+
Point current_step_in_path = path_found_dest;
|
| 177 |
+
while(!(bfs_parent_grid[current_step_in_path.r][current_step_in_path.c] == INVALID_POINT) &&
|
| 178 |
+
!(bfs_parent_grid[current_step_in_path.r][current_step_in_path.c] == start_pos)) {
|
| 179 |
+
current_step_in_path = bfs_parent_grid[current_step_in_path.r][current_step_in_path.c];
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
for(int i_dir = 0; i_dir < 4; ++i_dir){
|
| 183 |
+
if(start_pos.r + DIRS[i_dir].r == current_step_in_path.r &&
|
| 184 |
+
start_pos.c + DIRS[i_dir].c == current_step_in_path.c){
|
| 185 |
+
return DIR_CHARS_MOVE[i_dir];
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
return '.';
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
void initialize_game() {
|
| 193 |
+
std::cin >> N_pets_global;
|
| 194 |
+
pets_global_state.resize(N_pets_global);
|
| 195 |
+
for (int i = 0; i < N_pets_global; ++i) {
|
| 196 |
+
pets_global_state[i].id = i;
|
| 197 |
+
std::cin >> pets_global_state[i].pos.r >> pets_global_state[i].pos.c >> pets_global_state[i].type;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
std::cin >> M_humans_global;
|
| 201 |
+
humans_global_state.resize(M_humans_global);
|
| 202 |
+
|
| 203 |
+
for(int r_grid=0; r_grid <= GRID_SIZE; ++r_grid) for(int c_grid=0; c_grid <= GRID_SIZE; ++c_grid) is_impassable_grid_static[r_grid][c_grid] = false;
|
| 204 |
+
|
| 205 |
+
int base_strip_height = GRID_SIZE / M_humans_global;
|
| 206 |
+
int remainder_heights = GRID_SIZE % M_humans_global;
|
| 207 |
+
int current_r_start_coord = 1;
|
| 208 |
+
|
| 209 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 210 |
+
HumanInfo& human = humans_global_state[i];
|
| 211 |
+
human.id = i;
|
| 212 |
+
std::cin >> human.pos.r >> human.pos.c;
|
| 213 |
+
|
| 214 |
+
int strip_h_for_this_human = base_strip_height + (i < remainder_heights ? 1 : 0);
|
| 215 |
+
human.strip_r_start = current_r_start_coord;
|
| 216 |
+
human.strip_r_end = human.strip_r_start + strip_h_for_this_human - 1;
|
| 217 |
+
human.strip_r_end = std::min(human.strip_r_end, GRID_SIZE);
|
| 218 |
+
|
| 219 |
+
int actual_strip_h = human.strip_r_end - human.strip_r_start + 1;
|
| 220 |
+
int actual_strip_w = GRID_SIZE;
|
| 221 |
+
|
| 222 |
+
human.inner_safe_ul.r = human.strip_r_start + (actual_strip_h >= 3 ? 1 : 0);
|
| 223 |
+
human.inner_safe_ul.c = 1 + (actual_strip_w >= 3 ? 1 : 0);
|
| 224 |
+
human.inner_safe_br.r = human.strip_r_end - (actual_strip_h >= 3 ? 1 : 0);
|
| 225 |
+
human.inner_safe_br.c = GRID_SIZE - (actual_strip_w >= 3 ? 1 : 0);
|
| 226 |
+
|
| 227 |
+
if (human.inner_safe_ul.r > human.inner_safe_br.r) human.inner_safe_br.r = human.inner_safe_ul.r;
|
| 228 |
+
if (human.inner_safe_ul.c > human.inner_safe_br.c) human.inner_safe_br.c = human.inner_safe_ul.c;
|
| 229 |
+
|
| 230 |
+
human.final_stand_pos = {
|
| 231 |
+
human.inner_safe_ul.r + (human.inner_safe_br.r - human.inner_safe_ul.r) / 2,
|
| 232 |
+
human.inner_safe_ul.c + (human.inner_safe_br.c - human.inner_safe_ul.c) / 2
|
| 233 |
+
};
|
| 234 |
+
human.final_stand_pos.r = std::max(human.inner_safe_ul.r, std::min(human.inner_safe_br.r, human.final_stand_pos.r));
|
| 235 |
+
human.final_stand_pos.c = std::max(human.inner_safe_ul.c, std::min(human.inner_safe_br.c, human.final_stand_pos.c));
|
| 236 |
+
if (!is_valid_point(human.final_stand_pos)) {
|
| 237 |
+
human.final_stand_pos = {human.strip_r_start, 1};
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
human.assigned_wall_cells.clear();
|
| 241 |
+
int r_s = human.strip_r_start;
|
| 242 |
+
int r_e = human.strip_r_end;
|
| 243 |
+
|
| 244 |
+
if (i == 0) {
|
| 245 |
+
for (int c_coord = 1; c_coord <= GRID_SIZE; ++c_coord) human.assigned_wall_cells.push_back({r_s, c_coord});
|
| 246 |
+
} else {
|
| 247 |
+
for (int c_coord = GRID_SIZE / 2 + 1; c_coord <= GRID_SIZE; ++c_coord) human.assigned_wall_cells.push_back({r_s, c_coord});
|
| 248 |
+
}
|
| 249 |
+
if (i == M_humans_global - 1) {
|
| 250 |
+
for (int c_coord = 1; c_coord <= GRID_SIZE; ++c_coord) human.assigned_wall_cells.push_back({r_e, c_coord});
|
| 251 |
+
} else {
|
| 252 |
+
for (int c_coord = 1; c_coord <= GRID_SIZE / 2; ++c_coord) human.assigned_wall_cells.push_back({r_e, c_coord});
|
| 253 |
+
}
|
| 254 |
+
for (int r_mid = r_s + 1; r_mid <= r_e - 1; ++r_mid) {
|
| 255 |
+
human.assigned_wall_cells.push_back({r_mid, 1});
|
| 256 |
+
human.assigned_wall_cells.push_back({r_mid, GRID_SIZE});
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
std::sort(human.assigned_wall_cells.begin(), human.assigned_wall_cells.end());
|
| 260 |
+
human.assigned_wall_cells.erase(
|
| 261 |
+
std::unique(human.assigned_wall_cells.begin(), human.assigned_wall_cells.end()),
|
| 262 |
+
human.assigned_wall_cells.end()
|
| 263 |
+
);
|
| 264 |
+
current_r_start_coord = human.strip_r_end + 1;
|
| 265 |
+
}
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
std::string decide_human_actions() {
|
| 269 |
+
std::string actions_str(M_humans_global, '.');
|
| 270 |
+
std::vector<Point> tentative_walls_this_turn;
|
| 271 |
+
std::vector<Point> tentative_move_targets_this_turn(M_humans_global, INVALID_POINT);
|
| 272 |
+
|
| 273 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 274 |
+
HumanInfo& human = humans_global_state[i];
|
| 275 |
+
|
| 276 |
+
int unbuilt_walls_count = 0;
|
| 277 |
+
for (const auto& wall_cell : human.assigned_wall_cells) {
|
| 278 |
+
if (is_valid_point(wall_cell) && !is_impassable_grid_static[wall_cell.r][wall_cell.c]) {
|
| 279 |
+
unbuilt_walls_count++;
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
if (unbuilt_walls_count == 0) {
|
| 284 |
+
human.objective = (human.pos == human.final_stand_pos) ?
|
| 285 |
+
HumanObjective::STAYING_IN_SAFE_SPOT :
|
| 286 |
+
HumanObjective::GOING_TO_SAFE_SPOT;
|
| 287 |
+
} else {
|
| 288 |
+
human.objective = HumanObjective::BUILDING_WALLS;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
if(human.objective == HumanObjective::BUILDING_WALLS && human.turns_stuck_building >= MAX_STUCK_TURNS) {
|
| 292 |
+
human.objective = HumanObjective::REPOSITIONING_STUCK;
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
char chosen_action_for_human_i = '.';
|
| 296 |
+
if (human.objective == HumanObjective::STAYING_IN_SAFE_SPOT) {
|
| 297 |
+
chosen_action_for_human_i = '.';
|
| 298 |
+
} else if (human.objective == HumanObjective::GOING_TO_SAFE_SPOT ||
|
| 299 |
+
human.objective == HumanObjective::REPOSITIONING_STUCK) {
|
| 300 |
+
if(human.objective == HumanObjective::REPOSITIONING_STUCK) human.turns_stuck_building = 0;
|
| 301 |
+
|
| 302 |
+
chosen_action_for_human_i = get_bfs_move_char(human.pos, human.final_stand_pos, tentative_walls_this_turn);
|
| 303 |
+
|
| 304 |
+
} else if (human.objective == HumanObjective::BUILDING_WALLS) {
|
| 305 |
+
Point best_wall_target = INVALID_POINT;
|
| 306 |
+
Point best_stand_point = INVALID_POINT;
|
| 307 |
+
int min_eval_score = INF;
|
| 308 |
+
|
| 309 |
+
for (const auto& wall_coord : human.assigned_wall_cells) {
|
| 310 |
+
if (!is_valid_point(wall_coord) || is_impassable_grid_static[wall_coord.r][wall_coord.c]) continue;
|
| 311 |
+
if (!can_theoretically_build_at(wall_coord, human.id)) continue;
|
| 312 |
+
|
| 313 |
+
int adj_wall_bonus_val = count_adjacent_walls_or_boundaries(wall_coord) * ADJACENT_WALL_PRIORITY_BONUS;
|
| 314 |
+
int current_near_pet_penalty = 0; // NEAR_PET_PENALTY_POINTS_PER_PET is 0
|
| 315 |
+
|
| 316 |
+
for (int k_dir_idx = 0; k_dir_idx < 4; ++k_dir_idx) {
|
| 317 |
+
Point potential_stand_pos = {wall_coord.r + DIRS[k_dir_idx].r,
|
| 318 |
+
wall_coord.c + DIRS[k_dir_idx].c};
|
| 319 |
+
|
| 320 |
+
if (!is_valid_point(potential_stand_pos) || is_impassable_grid_static[potential_stand_pos.r][potential_stand_pos.c]) continue;
|
| 321 |
+
|
| 322 |
+
bool conflict_with_tentative_wall_build_spot = false;
|
| 323 |
+
for(const auto& tw : tentative_walls_this_turn) { if(potential_stand_pos == tw) { conflict_with_tentative_wall_build_spot = true; break; }}
|
| 324 |
+
if(conflict_with_tentative_wall_build_spot) continue;
|
| 325 |
+
|
| 326 |
+
bool conflict_with_tentative_move_dest = false;
|
| 327 |
+
for(int j=0; j < i; ++j) {
|
| 328 |
+
if (tentative_move_targets_this_turn[j] == potential_stand_pos) { conflict_with_tentative_move_dest = true; break; }
|
| 329 |
+
}
|
| 330 |
+
if (conflict_with_tentative_move_dest) continue;
|
| 331 |
+
|
| 332 |
+
int current_dist_to_stand = manhattan_distance(human.pos, potential_stand_pos);
|
| 333 |
+
int current_eval_score = current_dist_to_stand - adj_wall_bonus_val + current_near_pet_penalty;
|
| 334 |
+
|
| 335 |
+
bool is_inside_inner_safe_region =
|
| 336 |
+
(potential_stand_pos.r >= human.inner_safe_ul.r &&
|
| 337 |
+
potential_stand_pos.r <= human.inner_safe_br.r &&
|
| 338 |
+
potential_stand_pos.c >= human.inner_safe_ul.c &&
|
| 339 |
+
potential_stand_pos.c <= human.inner_safe_br.c);
|
| 340 |
+
|
| 341 |
+
if (!is_inside_inner_safe_region) {
|
| 342 |
+
current_eval_score += STAND_OUTSIDE_INNER_SAFE_PENALTY;
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
if (current_eval_score < min_eval_score) {
|
| 346 |
+
min_eval_score = current_eval_score;
|
| 347 |
+
best_wall_target = wall_coord;
|
| 348 |
+
best_stand_point = potential_stand_pos;
|
| 349 |
+
} else if (current_eval_score == min_eval_score) {
|
| 350 |
+
if (best_wall_target.r == -1 ||
|
| 351 |
+
wall_coord < best_wall_target ||
|
| 352 |
+
(wall_coord == best_wall_target && potential_stand_pos < best_stand_point)) {
|
| 353 |
+
best_wall_target = wall_coord;
|
| 354 |
+
best_stand_point = potential_stand_pos;
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
if (best_wall_target.r != -1) {
|
| 361 |
+
human.turns_stuck_building = 0;
|
| 362 |
+
if (human.pos == best_stand_point) {
|
| 363 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 364 |
+
if(human.pos.r + DIRS[k_dir].r == best_wall_target.r &&
|
| 365 |
+
human.pos.c + DIRS[k_dir].c == best_wall_target.c){
|
| 366 |
+
chosen_action_for_human_i = DIR_CHARS_BUILD[k_dir];
|
| 367 |
+
break;
|
| 368 |
+
}
|
| 369 |
+
}
|
| 370 |
+
} else {
|
| 371 |
+
chosen_action_for_human_i = get_bfs_move_char(human.pos, best_stand_point, tentative_walls_this_turn);
|
| 372 |
+
}
|
| 373 |
+
} else {
|
| 374 |
+
if (unbuilt_walls_count > 0) {
|
| 375 |
+
human.turns_stuck_building++;
|
| 376 |
+
}
|
| 377 |
+
if (human.pos != human.final_stand_pos) {
|
| 378 |
+
chosen_action_for_human_i = get_bfs_move_char(human.pos, human.final_stand_pos, tentative_walls_this_turn);
|
| 379 |
+
} else {
|
| 380 |
+
chosen_action_for_human_i = '.';
|
| 381 |
+
}
|
| 382 |
+
}
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
actions_str[i] = chosen_action_for_human_i;
|
| 386 |
+
|
| 387 |
+
if (chosen_action_for_human_i != '.' && (chosen_action_for_human_i == 'u' || chosen_action_for_human_i == 'd' || chosen_action_for_human_i == 'l' || chosen_action_for_human_i == 'r')) {
|
| 388 |
+
for(int k_dir=0; k_dir<4; ++k_dir) {
|
| 389 |
+
if (chosen_action_for_human_i == DIR_CHARS_BUILD[k_dir]) {
|
| 390 |
+
Point built_wall_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 391 |
+
if (is_valid_point(built_wall_pos)) {
|
| 392 |
+
tentative_walls_this_turn.push_back(built_wall_pos);
|
| 393 |
+
}
|
| 394 |
+
break;
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
} else if (chosen_action_for_human_i != '.' && (chosen_action_for_human_i == 'U' || chosen_action_for_human_i == 'D' || chosen_action_for_human_i == 'L' || chosen_action_for_human_i == 'R')) {
|
| 398 |
+
for(int k_dir=0; k_dir<4; ++k_dir) {
|
| 399 |
+
if (chosen_action_for_human_i == DIR_CHARS_MOVE[k_dir]) {
|
| 400 |
+
Point target_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 401 |
+
if (is_valid_point(target_pos)) {
|
| 402 |
+
tentative_move_targets_this_turn[i] = target_pos;
|
| 403 |
+
} else {
|
| 404 |
+
actions_str[i] = '.';
|
| 405 |
+
}
|
| 406 |
+
break;
|
| 407 |
+
}
|
| 408 |
+
}
|
| 409 |
+
}
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 413 |
+
if (actions_str[i] != '.' && (actions_str[i] == 'U' || actions_str[i] == 'D' || actions_str[i] == 'L' || actions_str[i] == 'R')) {
|
| 414 |
+
Point target_move_sq = tentative_move_targets_this_turn[i];
|
| 415 |
+
if (target_move_sq.r == -1) {
|
| 416 |
+
actions_str[i] = '.';
|
| 417 |
+
continue;
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
bool conflict_with_wall = false;
|
| 421 |
+
for (const auto& wall_being_built : tentative_walls_this_turn) {
|
| 422 |
+
if (target_move_sq == wall_being_built) {
|
| 423 |
+
conflict_with_wall = true;
|
| 424 |
+
break;
|
| 425 |
+
}
|
| 426 |
+
}
|
| 427 |
+
if (conflict_with_wall) {
|
| 428 |
+
actions_str[i] = '.';
|
| 429 |
+
} else {
|
| 430 |
+
for (int j = 0; j < i; ++j) {
|
| 431 |
+
if (actions_str[j] != '.' && (actions_str[j] == 'U' || actions_str[j] == 'D' || actions_str[j] == 'L' || actions_str[j] == 'R') &&
|
| 432 |
+
tentative_move_targets_this_turn[j] == target_move_sq) {
|
| 433 |
+
actions_str[i] = '.';
|
| 434 |
+
break;
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
}
|
| 438 |
+
}
|
| 439 |
+
}
|
| 440 |
+
return actions_str;
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
void apply_actions_and_update_state(const std::string& actions_str_final) {
|
| 444 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 445 |
+
char action = actions_str_final[i];
|
| 446 |
+
HumanInfo& human = humans_global_state[i];
|
| 447 |
+
if (action != '.' && (action == 'u' || action == 'd' || action == 'l' || action == 'r')) {
|
| 448 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 449 |
+
if (action == DIR_CHARS_BUILD[k_dir]) {
|
| 450 |
+
Point wall_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 451 |
+
if (is_valid_point(wall_pos) && !is_impassable_grid_static[wall_pos.r][wall_pos.c]) {
|
| 452 |
+
is_impassable_grid_static[wall_pos.r][wall_pos.c] = true;
|
| 453 |
+
}
|
| 454 |
+
break;
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
}
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
for (int i = 0; i < M_humans_global; ++i) {
|
| 461 |
+
char action = actions_str_final[i];
|
| 462 |
+
HumanInfo& human = humans_global_state[i];
|
| 463 |
+
if (action != '.' && (action == 'U' || action == 'D' || action == 'L' || action == 'R')) {
|
| 464 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 465 |
+
if (action == DIR_CHARS_MOVE[k_dir]) {
|
| 466 |
+
Point next_pos = {human.pos.r + DIRS[k_dir].r, human.pos.c + DIRS[k_dir].c};
|
| 467 |
+
if (is_valid_point(next_pos) && !is_impassable_grid_static[next_pos.r][next_pos.c]) {
|
| 468 |
+
human.pos = next_pos;
|
| 469 |
+
}
|
| 470 |
+
break;
|
| 471 |
+
}
|
| 472 |
+
}
|
| 473 |
+
}
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
for (int i = 0; i < N_pets_global; ++i) {
|
| 477 |
+
std::string pet_moves_str;
|
| 478 |
+
std::cin >> pet_moves_str;
|
| 479 |
+
if (pet_moves_str == ".") continue;
|
| 480 |
+
|
| 481 |
+
for (char move_char : pet_moves_str) {
|
| 482 |
+
for(int k_dir=0; k_dir<4; ++k_dir){
|
| 483 |
+
if(move_char == PET_MOVE_CHARS[k_dir]){
|
| 484 |
+
pets_global_state[i].pos.r += DIRS[k_dir].r;
|
| 485 |
+
pets_global_state[i].pos.c += DIRS[k_dir].c;
|
| 486 |
+
break;
|
| 487 |
+
}
|
| 488 |
+
}
|
| 489 |
+
}
|
| 490 |
+
}
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
int main() {
|
| 494 |
+
std::ios_base::sync_with_stdio(false);
|
| 495 |
+
std::cin.tie(NULL);
|
| 496 |
+
|
| 497 |
+
initialize_game();
|
| 498 |
+
|
| 499 |
+
for (int turn_idx = 0; turn_idx < NUM_TURNS; ++turn_idx) {
|
| 500 |
+
std::string actions_to_perform = decide_human_actions();
|
| 501 |
+
std::cout << actions_to_perform << std::endl;
|
| 502 |
+
|
| 503 |
+
apply_actions_and_update_state(actions_to_perform);
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
return 0;
|
| 507 |
+
}
|
| 508 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale_agent_best/ahc011.cpp
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <array>
|
| 6 |
+
#include <algorithm>
|
| 7 |
+
#include <unordered_map>
|
| 8 |
+
#include <map> // For A* visited set
|
| 9 |
+
#include <iomanip>
|
| 10 |
+
#include <chrono>
|
| 11 |
+
#include <functional> // For std::hash
|
| 12 |
+
#include <cmath> // For std::round
|
| 13 |
+
#include <random> // For std::mt19937
|
| 14 |
+
#include <numeric> // For std::iota
|
| 15 |
+
#include <queue> // For A* search (priority_queue)
|
| 16 |
+
|
| 17 |
+
// Constants for tile connections
|
| 18 |
+
const int LEFT_MASK = 1;
|
| 19 |
+
const int UP_MASK = 2;
|
| 20 |
+
const int RIGHT_MASK = 4;
|
| 21 |
+
const int DOWN_MASK = 8;
|
| 22 |
+
|
| 23 |
+
// Max N value, actual N read from input
|
| 24 |
+
const int N_MAX_CONST = 10;
|
| 25 |
+
int N_actual; // Actual N for the current test case
|
| 26 |
+
int T_param; // Actual T for the current test case
|
| 27 |
+
|
| 28 |
+
const int DR_TILE_RELATIVE_TO_EMPTY[] = {-1, 1, 0, 0};
|
| 29 |
+
const int DC_TILE_RELATIVE_TO_EMPTY[] = {0, 0, -1, 1};
|
| 30 |
+
const char MOVE_CHARS[] = {'U', 'D', 'L', 'R'};
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
std::mt19937 zobrist_rng_engine(123456789);
|
| 34 |
+
std::uniform_int_distribution<uint64_t> distrib_uint64;
|
| 35 |
+
uint64_t zobrist_tile_keys[N_MAX_CONST][N_MAX_CONST][16];
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
void init_zobrist_keys() {
|
| 39 |
+
for (int i = 0; i < N_actual; ++i) {
|
| 40 |
+
for (int j = 0; j < N_actual; ++j) {
|
| 41 |
+
for (int k = 0; k < 16; ++k) {
|
| 42 |
+
zobrist_tile_keys[i][j][k] = distrib_uint64(zobrist_rng_engine);
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
int hex_char_to_int(char c) {
|
| 49 |
+
if (c >= '0' && c <= '9') return c - '0';
|
| 50 |
+
return c - 'a' + 10;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
struct Board {
|
| 55 |
+
std::array<std::array<char, N_MAX_CONST>, N_MAX_CONST> tiles;
|
| 56 |
+
int empty_r, empty_c;
|
| 57 |
+
uint64_t zobrist_hash_value;
|
| 58 |
+
|
| 59 |
+
Board() : empty_r(0), empty_c(0), zobrist_hash_value(0) {}
|
| 60 |
+
|
| 61 |
+
void calculate_initial_hash() {
|
| 62 |
+
zobrist_hash_value = 0;
|
| 63 |
+
for (int i = 0; i < N_actual; ++i) {
|
| 64 |
+
for (int j = 0; j < N_actual; ++j) {
|
| 65 |
+
zobrist_hash_value ^= zobrist_tile_keys[i][j][hex_char_to_int(tiles[i][j])];
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
void update_hash_after_move(int pos_tile_becomes_empty_r, int pos_tile_becomes_empty_c,
|
| 71 |
+
int pos_empty_gets_tile_r, int pos_empty_gets_tile_c) {
|
| 72 |
+
int moved_tile_val_int = hex_char_to_int(tiles[pos_empty_gets_tile_r][pos_empty_gets_tile_c]);
|
| 73 |
+
|
| 74 |
+
zobrist_hash_value ^= zobrist_tile_keys[pos_tile_becomes_empty_r][pos_tile_becomes_empty_c][moved_tile_val_int];
|
| 75 |
+
zobrist_hash_value ^= zobrist_tile_keys[pos_empty_gets_tile_r][pos_empty_gets_tile_c][0];
|
| 76 |
+
|
| 77 |
+
zobrist_hash_value ^= zobrist_tile_keys[pos_tile_becomes_empty_r][pos_tile_becomes_empty_c][0];
|
| 78 |
+
zobrist_hash_value ^= zobrist_tile_keys[pos_empty_gets_tile_r][pos_empty_gets_tile_c][moved_tile_val_int];
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
bool apply_move_char(char move_char) {
|
| 82 |
+
int move_dir_idx = -1;
|
| 83 |
+
for(int i=0; i<4; ++i) if(MOVE_CHARS[i] == move_char) move_dir_idx = i;
|
| 84 |
+
|
| 85 |
+
if(move_dir_idx == -1) return false;
|
| 86 |
+
|
| 87 |
+
int tile_to_move_r = empty_r + DR_TILE_RELATIVE_TO_EMPTY[move_dir_idx];
|
| 88 |
+
int tile_to_move_c = empty_c + DC_TILE_RELATIVE_TO_EMPTY[move_dir_idx];
|
| 89 |
+
|
| 90 |
+
if (tile_to_move_r < 0 || tile_to_move_r >= N_actual || tile_to_move_c < 0 || tile_to_move_c >= N_actual) {
|
| 91 |
+
return false;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
char moved_tile_hex_val = tiles[tile_to_move_r][tile_to_move_c];
|
| 95 |
+
tiles[empty_r][empty_c] = moved_tile_hex_val;
|
| 96 |
+
tiles[tile_to_move_r][tile_to_move_c] = '0';
|
| 97 |
+
|
| 98 |
+
update_hash_after_move(tile_to_move_r, tile_to_move_c, empty_r, empty_c);
|
| 99 |
+
|
| 100 |
+
empty_r = tile_to_move_r;
|
| 101 |
+
empty_c = tile_to_move_c;
|
| 102 |
+
return true;
|
| 103 |
+
}
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
struct ScoreComponents {
|
| 108 |
+
int max_tree_size;
|
| 109 |
+
int num_components;
|
| 110 |
+
};
|
| 111 |
+
std::unordered_map<uint64_t, ScoreComponents> s_value_cache_by_hash;
|
| 112 |
+
const size_t MAX_SCORE_CACHE_SIZE_CONST = 2000000;
|
| 113 |
+
|
| 114 |
+
struct DSU {
|
| 115 |
+
std::vector<int> parent;
|
| 116 |
+
std::vector<int> nodes_in_set;
|
| 117 |
+
std::vector<int> edges_in_set;
|
| 118 |
+
int N_sq_total_cells;
|
| 119 |
+
|
| 120 |
+
DSU(int current_N) : N_sq_total_cells(current_N * current_N) {
|
| 121 |
+
parent.resize(N_sq_total_cells);
|
| 122 |
+
std::iota(parent.begin(), parent.end(), 0);
|
| 123 |
+
nodes_in_set.assign(N_sq_total_cells, 0);
|
| 124 |
+
edges_in_set.assign(N_sq_total_cells, 0);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
int find(int i) {
|
| 128 |
+
if (parent[i] == i)
|
| 129 |
+
return i;
|
| 130 |
+
return parent[i] = find(parent[i]);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
void unite(int i_idx, int j_idx) {
|
| 134 |
+
int root_i = find(i_idx);
|
| 135 |
+
int root_j = find(j_idx);
|
| 136 |
+
|
| 137 |
+
if (nodes_in_set[root_i] < nodes_in_set[root_j]) std::swap(root_i, root_j);
|
| 138 |
+
|
| 139 |
+
parent[root_j] = root_i;
|
| 140 |
+
nodes_in_set[root_i] += nodes_in_set[root_j];
|
| 141 |
+
edges_in_set[root_i] += edges_in_set[root_j];
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
void add_edge(int u_idx, int v_idx) {
|
| 145 |
+
int root_u = find(u_idx);
|
| 146 |
+
int root_v = find(v_idx);
|
| 147 |
+
if (root_u != root_v) {
|
| 148 |
+
unite(u_idx, v_idx);
|
| 149 |
+
edges_in_set[find(u_idx)]++;
|
| 150 |
+
} else {
|
| 151 |
+
edges_in_set[root_u]++;
|
| 152 |
+
}
|
| 153 |
+
}
|
| 154 |
+
};
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
ScoreComponents calculate_scores(const Board& board) {
|
| 158 |
+
auto it_cache = s_value_cache_by_hash.find(board.zobrist_hash_value);
|
| 159 |
+
if (it_cache != s_value_cache_by_hash.end()) {
|
| 160 |
+
return it_cache->second;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
DSU dsu(N_actual);
|
| 164 |
+
|
| 165 |
+
for (int r = 0; r < N_actual; ++r) {
|
| 166 |
+
for (int c = 0; c < N_actual; ++c) {
|
| 167 |
+
int cell_idx = r * N_actual + c;
|
| 168 |
+
if (board.tiles[r][c] != '0') {
|
| 169 |
+
dsu.nodes_in_set[cell_idx] = 1;
|
| 170 |
+
} else {
|
| 171 |
+
dsu.nodes_in_set[cell_idx] = 0;
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
for (int r = 0; r < N_actual; ++r) {
|
| 177 |
+
for (int c = 0; c < N_actual - 1; ++c) {
|
| 178 |
+
int tile1_val = hex_char_to_int(board.tiles[r][c]);
|
| 179 |
+
int tile2_val = hex_char_to_int(board.tiles[r][c+1]);
|
| 180 |
+
if (tile1_val != 0 && tile2_val != 0) {
|
| 181 |
+
if ((tile1_val & RIGHT_MASK) && (tile2_val & LEFT_MASK)) {
|
| 182 |
+
dsu.add_edge(r * N_actual + c, r * N_actual + (c + 1));
|
| 183 |
+
}
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
for (int r = 0; r < N_actual - 1; ++r) {
|
| 188 |
+
for (int c = 0; c < N_actual; ++c) {
|
| 189 |
+
int tile1_val = hex_char_to_int(board.tiles[r][c]);
|
| 190 |
+
int tile2_val = hex_char_to_int(board.tiles[r+1][c]);
|
| 191 |
+
if (tile1_val != 0 && tile2_val != 0) {
|
| 192 |
+
if ((tile1_val & DOWN_MASK) && (tile2_val & UP_MASK)) {
|
| 193 |
+
dsu.add_edge(r * N_actual + c, (r + 1) * N_actual + c);
|
| 194 |
+
}
|
| 195 |
+
}
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
int max_tree_size = 0;
|
| 200 |
+
int total_num_components = 0;
|
| 201 |
+
|
| 202 |
+
for (int i = 0; i < dsu.N_sq_total_cells; ++i) {
|
| 203 |
+
if (dsu.parent[i] == i && dsu.nodes_in_set[i] > 0) {
|
| 204 |
+
total_num_components++;
|
| 205 |
+
if (dsu.edges_in_set[i] == dsu.nodes_in_set[i] - 1) {
|
| 206 |
+
if (dsu.nodes_in_set[i] > max_tree_size) {
|
| 207 |
+
max_tree_size = dsu.nodes_in_set[i];
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
ScoreComponents result = {max_tree_size, total_num_components};
|
| 214 |
+
if (s_value_cache_by_hash.size() < MAX_SCORE_CACHE_SIZE_CONST) {
|
| 215 |
+
s_value_cache_by_hash[board.zobrist_hash_value] = result;
|
| 216 |
+
}
|
| 217 |
+
return result;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
int TARGET_EMPTY_R_GLOBAL_FOR_A_STAR, TARGET_EMPTY_C_GLOBAL_FOR_A_STAR; // Used by A* heuristic
|
| 222 |
+
bool A_STAR_PHASE_WAS_RUN = false; // Flag to adjust beam score empty penalty
|
| 223 |
+
|
| 224 |
+
double calculate_beam_score(const ScoreComponents& scores, int K_total, const Board& current_board_state) {
|
| 225 |
+
int S = scores.max_tree_size;
|
| 226 |
+
|
| 227 |
+
const double FULL_TREE_BASE_SCORE = 1e18;
|
| 228 |
+
if (S == N_actual * N_actual - 1) {
|
| 229 |
+
return FULL_TREE_BASE_SCORE + (double)(T_param * 2 - K_total);
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
double W_S = 1e9;
|
| 233 |
+
double W_NC = W_S * 0.8; // Make W_NC very strong, almost as much as increasing S by 1.
|
| 234 |
+
double W_K = 1.0;
|
| 235 |
+
double W_empty_dist_penalty_main;
|
| 236 |
+
|
| 237 |
+
if (A_STAR_PHASE_WAS_RUN) { // A* moved empty to target initially
|
| 238 |
+
W_empty_dist_penalty_main = W_K * 0.5; // Very low penalty, allow free movement
|
| 239 |
+
} else { // Empty started at target, or A* failed (should not happen)
|
| 240 |
+
W_empty_dist_penalty_main = W_K * 10.0; // Moderate penalty
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
double score_val = (double)S * W_S;
|
| 244 |
+
if (scores.num_components > 1) {
|
| 245 |
+
score_val -= (double)(scores.num_components - 1) * W_NC;
|
| 246 |
+
} else if (scores.num_components == 0 && N_actual * N_actual - 1 > 0) {
|
| 247 |
+
score_val -= (double)(N_actual * N_actual -1) * W_NC;
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
// Bonus for being very close to a full tree and connected
|
| 251 |
+
if (S >= (N_actual * N_actual - 1) - 2 && scores.num_components == 1 && S < N_actual * N_actual - 1) {
|
| 252 |
+
score_val += W_S * 0.5; // Significant bonus to encourage the last step
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
score_val -= (double)K_total * W_K;
|
| 256 |
+
|
| 257 |
+
// Penalty for empty square relative to (N-1,N-1)
|
| 258 |
+
int dist_empty_to_corner = std::abs(current_board_state.empty_r - (N_actual - 1)) +
|
| 259 |
+
std::abs(current_board_state.empty_c - (N_actual - 1));
|
| 260 |
+
score_val -= dist_empty_to_corner * W_empty_dist_penalty_main;
|
| 261 |
+
|
| 262 |
+
return score_val;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
double calculate_actual_score(int S, int K_total) {
|
| 266 |
+
if (N_actual * N_actual - 1 == 0) return 0;
|
| 267 |
+
if (S == N_actual * N_actual - 1) {
|
| 268 |
+
if (K_total > T_param) return 0;
|
| 269 |
+
return std::round(500000.0 * (2.0 - (double)K_total / T_param));
|
| 270 |
+
} else {
|
| 271 |
+
return std::round(500000.0 * (double)S / (N_actual * N_actual - 1.0));
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
struct BeamHistoryEntry {
|
| 276 |
+
int parent_history_idx;
|
| 277 |
+
char move_char_taken;
|
| 278 |
+
};
|
| 279 |
+
std::vector<BeamHistoryEntry> beam_history_storage;
|
| 280 |
+
const size_t MAX_BEAM_HISTORY_STORAGE_SIZE_CONST = 3000000;
|
| 281 |
+
|
| 282 |
+
struct BeamState {
|
| 283 |
+
Board board;
|
| 284 |
+
double beam_score_val;
|
| 285 |
+
int k_beam_moves;
|
| 286 |
+
int history_idx;
|
| 287 |
+
int prev_move_direction_idx;
|
| 288 |
+
|
| 289 |
+
bool operator<(const BeamState& other) const {
|
| 290 |
+
return beam_score_val > other.beam_score_val;
|
| 291 |
+
}
|
| 292 |
+
};
|
| 293 |
+
|
| 294 |
+
std::chrono::steady_clock::time_point T_START_CHRONO_MAIN;
|
| 295 |
+
const int TIME_LIMIT_MS_SLACK_CONST = 400; // Universal slack
|
| 296 |
+
long long TIME_LIMIT_MS_EFFECTIVE_MAIN;
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
std::mt19937 rng_stochastic_selection_main;
|
| 300 |
+
std::unordered_map<uint64_t, int> min_K_to_reach_by_hash_main;
|
| 301 |
+
const size_t MAX_MIN_K_CACHE_SIZE_CONST = 2000000;
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
struct AStarEmptyState {
|
| 305 |
+
int r, c;
|
| 306 |
+
int g_cost;
|
| 307 |
+
std::string path;
|
| 308 |
+
|
| 309 |
+
bool operator>(const AStarEmptyState& other) const {
|
| 310 |
+
int h_cost_this = std::abs(r - TARGET_EMPTY_R_GLOBAL_FOR_A_STAR) + std::abs(c - TARGET_EMPTY_C_GLOBAL_FOR_A_STAR);
|
| 311 |
+
int h_cost_other = std::abs(other.r - TARGET_EMPTY_R_GLOBAL_FOR_A_STAR) + std::abs(other.c - TARGET_EMPTY_C_GLOBAL_FOR_A_STAR);
|
| 312 |
+
if (g_cost + h_cost_this != other.g_cost + h_cost_other) {
|
| 313 |
+
return g_cost + h_cost_this > other.g_cost + h_cost_other;
|
| 314 |
+
}
|
| 315 |
+
return g_cost > other.g_cost;
|
| 316 |
+
}
|
| 317 |
+
};
|
| 318 |
+
|
| 319 |
+
std::string find_path_for_empty(const Board& initial_board_state_for_A_star, int target_r, int target_c) {
|
| 320 |
+
TARGET_EMPTY_R_GLOBAL_FOR_A_STAR = target_r;
|
| 321 |
+
TARGET_EMPTY_C_GLOBAL_FOR_A_STAR = target_c;
|
| 322 |
+
|
| 323 |
+
std::priority_queue<AStarEmptyState, std::vector<AStarEmptyState>, std::greater<AStarEmptyState>> pq;
|
| 324 |
+
std::vector<std::vector<int>> min_g_cost_grid(N_actual, std::vector<int>(N_actual, T_param + 1));
|
| 325 |
+
|
| 326 |
+
pq.push({initial_board_state_for_A_star.empty_r, initial_board_state_for_A_star.empty_c, 0, ""});
|
| 327 |
+
min_g_cost_grid[initial_board_state_for_A_star.empty_r][initial_board_state_for_A_star.empty_c] = 0;
|
| 328 |
+
|
| 329 |
+
int A_star_max_depth = N_actual * N_actual * 2; // Allow more depth just in case
|
| 330 |
+
|
| 331 |
+
while(!pq.empty()){
|
| 332 |
+
AStarEmptyState current = pq.top();
|
| 333 |
+
pq.pop();
|
| 334 |
+
|
| 335 |
+
if (current.g_cost > min_g_cost_grid[current.r][current.c]) {
|
| 336 |
+
continue;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
if (current.r == target_r && current.c == target_c) {
|
| 340 |
+
return current.path;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
if (current.g_cost >= A_star_max_depth) continue;
|
| 344 |
+
|
| 345 |
+
for (int move_idx = 0; move_idx < 4; ++move_idx) {
|
| 346 |
+
int tile_that_moves_r = current.r + DR_TILE_RELATIVE_TO_EMPTY[move_idx];
|
| 347 |
+
int tile_that_moves_c = current.c + DC_TILE_RELATIVE_TO_EMPTY[move_idx];
|
| 348 |
+
|
| 349 |
+
if (tile_that_moves_r < 0 || tile_that_moves_r >= N_actual || tile_that_moves_c < 0 || tile_that_moves_c >= N_actual) {
|
| 350 |
+
continue;
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
int next_empty_r = tile_that_moves_r;
|
| 354 |
+
int next_empty_c = tile_that_moves_c;
|
| 355 |
+
|
| 356 |
+
int next_g_cost = current.g_cost + 1;
|
| 357 |
+
|
| 358 |
+
if (min_g_cost_grid[next_empty_r][next_empty_c] <= next_g_cost) {
|
| 359 |
+
continue;
|
| 360 |
+
}
|
| 361 |
+
min_g_cost_grid[next_empty_r][next_empty_c] = next_g_cost;
|
| 362 |
+
pq.push({next_empty_r, next_empty_c, next_g_cost, current.path + MOVE_CHARS[move_idx]});
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
return "";
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
std::string reconstruct_beam_path(int final_history_idx) {
|
| 369 |
+
std::string path_str = "";
|
| 370 |
+
int current_trace_hist_idx = final_history_idx;
|
| 371 |
+
while(current_trace_hist_idx > 0 &&
|
| 372 |
+
static_cast<size_t>(current_trace_hist_idx) < beam_history_storage.size() &&
|
| 373 |
+
beam_history_storage[current_trace_hist_idx].parent_history_idx != -1) {
|
| 374 |
+
path_str += beam_history_storage[current_trace_hist_idx].move_char_taken;
|
| 375 |
+
current_trace_hist_idx = beam_history_storage[current_trace_hist_idx].parent_history_idx;
|
| 376 |
+
}
|
| 377 |
+
std::reverse(path_str.begin(), path_str.end());
|
| 378 |
+
return path_str;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
int main(int /*argc*/, char** /*argv*/) {
|
| 383 |
+
std::ios_base::sync_with_stdio(false);
|
| 384 |
+
std::cin.tie(NULL);
|
| 385 |
+
|
| 386 |
+
unsigned int random_seed_stochastic = std::chrono::steady_clock::now().time_since_epoch().count();
|
| 387 |
+
rng_stochastic_selection_main.seed(random_seed_stochastic);
|
| 388 |
+
|
| 389 |
+
T_START_CHRONO_MAIN = std::chrono::steady_clock::now();
|
| 390 |
+
|
| 391 |
+
std::cin >> N_actual >> T_param;
|
| 392 |
+
|
| 393 |
+
init_zobrist_keys();
|
| 394 |
+
|
| 395 |
+
Board current_board_obj;
|
| 396 |
+
for (int i = 0; i < N_actual; ++i) {
|
| 397 |
+
std::string row_str;
|
| 398 |
+
std::cin >> row_str;
|
| 399 |
+
for (int j = 0; j < N_actual; ++j) {
|
| 400 |
+
current_board_obj.tiles[i][j] = row_str[j];
|
| 401 |
+
if (current_board_obj.tiles[i][j] == '0') {
|
| 402 |
+
current_board_obj.empty_r = i;
|
| 403 |
+
current_board_obj.empty_c = j;
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
}
|
| 407 |
+
current_board_obj.calculate_initial_hash();
|
| 408 |
+
|
| 409 |
+
std::string initial_empty_moves_path = "";
|
| 410 |
+
int target_empty_final_r = N_actual - 1;
|
| 411 |
+
int target_empty_final_c = N_actual - 1;
|
| 412 |
+
|
| 413 |
+
if (current_board_obj.empty_r != target_empty_final_r || current_board_obj.empty_c != target_empty_final_c) {
|
| 414 |
+
initial_empty_moves_path = find_path_for_empty(current_board_obj, target_empty_final_r, target_empty_final_c);
|
| 415 |
+
A_STAR_PHASE_WAS_RUN = !initial_empty_moves_path.empty();
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
for (char move_char : initial_empty_moves_path) {
|
| 419 |
+
current_board_obj.apply_move_char(move_char);
|
| 420 |
+
}
|
| 421 |
+
int K_initial_empty_moves = initial_empty_moves_path.length();
|
| 422 |
+
|
| 423 |
+
// Adaptive time limit after A*
|
| 424 |
+
auto time_after_astar = std::chrono::steady_clock::now();
|
| 425 |
+
long long elapsed_astar_ms = std::chrono::duration_cast<std::chrono::milliseconds>(time_after_astar - T_START_CHRONO_MAIN).count();
|
| 426 |
+
TIME_LIMIT_MS_EFFECTIVE_MAIN = 2950 - elapsed_astar_ms - TIME_LIMIT_MS_SLACK_CONST;
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
beam_history_storage.reserve(MAX_BEAM_HISTORY_STORAGE_SIZE_CONST);
|
| 430 |
+
s_value_cache_by_hash.reserve(MAX_SCORE_CACHE_SIZE_CONST);
|
| 431 |
+
min_K_to_reach_by_hash_main.reserve(MAX_MIN_K_CACHE_SIZE_CONST);
|
| 432 |
+
|
| 433 |
+
std::vector<BeamState> current_beam;
|
| 434 |
+
|
| 435 |
+
ScoreComponents initial_scores_for_beam = calculate_scores(current_board_obj);
|
| 436 |
+
double initial_beam_eval_score = calculate_beam_score(initial_scores_for_beam, K_initial_empty_moves, current_board_obj);
|
| 437 |
+
|
| 438 |
+
beam_history_storage.push_back({-1, ' '});
|
| 439 |
+
current_beam.push_back({current_board_obj, initial_beam_eval_score, 0, 0, -1});
|
| 440 |
+
|
| 441 |
+
double overall_best_actual_score = calculate_actual_score(initial_scores_for_beam.max_tree_size, K_initial_empty_moves);
|
| 442 |
+
std::string overall_best_path_str = initial_empty_moves_path;
|
| 443 |
+
|
| 444 |
+
min_K_to_reach_by_hash_main[current_board_obj.zobrist_hash_value] = K_initial_empty_moves;
|
| 445 |
+
|
| 446 |
+
int beam_width;
|
| 447 |
+
float elite_ratio = 0.2f; // Standard elite ratio
|
| 448 |
+
int stochastic_sample_pool_factor = 3;
|
| 449 |
+
|
| 450 |
+
if (N_actual <= 6) { beam_width = 1200;} // N=6 is small, can afford wider
|
| 451 |
+
else if (N_actual == 7) { beam_width = 1000;}
|
| 452 |
+
else if (N_actual == 8) { beam_width = 700;} // Reduced from 800 to save time slightly
|
| 453 |
+
else if (N_actual == 9) { beam_width = 400;} // Reduced from 500
|
| 454 |
+
else { beam_width = 250;} // N=10, reduced from 300
|
| 455 |
+
|
| 456 |
+
std::vector<BeamState> candidates_pool;
|
| 457 |
+
candidates_pool.reserve(beam_width * 4 + 10);
|
| 458 |
+
|
| 459 |
+
std::vector<BeamState> next_beam_states_temp;
|
| 460 |
+
next_beam_states_temp.reserve(beam_width + 10);
|
| 461 |
+
|
| 462 |
+
std::vector<int> stochastic_selection_indices;
|
| 463 |
+
stochastic_selection_indices.reserve(stochastic_sample_pool_factor * beam_width + 10);
|
| 464 |
+
|
| 465 |
+
int k_iter_count_beam = 0;
|
| 466 |
+
|
| 467 |
+
for (int k_beam_iter = 0; K_initial_empty_moves + k_beam_iter < T_param; ++k_beam_iter) {
|
| 468 |
+
k_iter_count_beam++;
|
| 469 |
+
if (k_iter_count_beam % 10 == 0) {
|
| 470 |
+
if (std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - T_START_CHRONO_MAIN).count() > TIME_LIMIT_MS_EFFECTIVE_MAIN + elapsed_astar_ms) {
|
| 471 |
+
// Compare against total time budget, not just remaining for beam.
|
| 472 |
+
// Total time used > total budget minus slack
|
| 473 |
+
if (std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - T_START_CHRONO_MAIN).count() > 2950 - TIME_LIMIT_MS_SLACK_CONST) {
|
| 474 |
+
break;
|
| 475 |
+
}
|
| 476 |
+
}
|
| 477 |
+
}
|
| 478 |
+
if (beam_history_storage.size() >= MAX_BEAM_HISTORY_STORAGE_SIZE_CONST - ( (size_t)beam_width * 4 + 100) ) {
|
| 479 |
+
break;
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
candidates_pool.clear();
|
| 483 |
+
|
| 484 |
+
for (const auto& current_state_in_beam : current_beam) {
|
| 485 |
+
Board temp_board_for_moves = current_state_in_beam.board;
|
| 486 |
+
|
| 487 |
+
int parent_k_beam = current_state_in_beam.k_beam_moves;
|
| 488 |
+
int parent_history_idx = current_state_in_beam.history_idx;
|
| 489 |
+
int prev_m_dir_idx = current_state_in_beam.prev_move_direction_idx;
|
| 490 |
+
|
| 491 |
+
for (int move_dir_idx = 0; move_dir_idx < 4; ++move_dir_idx) {
|
| 492 |
+
if (prev_m_dir_idx != -1) {
|
| 493 |
+
if ((prev_m_dir_idx ^ 1) == move_dir_idx) { // Check for U/D or L/R reversal using XOR trick
|
| 494 |
+
continue;
|
| 495 |
+
}
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
char current_move_char = MOVE_CHARS[move_dir_idx];
|
| 499 |
+
int original_empty_r = temp_board_for_moves.empty_r;
|
| 500 |
+
int original_empty_c = temp_board_for_moves.empty_c;
|
| 501 |
+
uint64_t original_hash = temp_board_for_moves.zobrist_hash_value;
|
| 502 |
+
|
| 503 |
+
int tile_to_move_r = original_empty_r + DR_TILE_RELATIVE_TO_EMPTY[move_dir_idx];
|
| 504 |
+
int tile_to_move_c = original_empty_c + DC_TILE_RELATIVE_TO_EMPTY[move_dir_idx];
|
| 505 |
+
|
| 506 |
+
if (tile_to_move_r < 0 || tile_to_move_r >= N_actual || tile_to_move_c < 0 || tile_to_move_c >= N_actual) {
|
| 507 |
+
continue;
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
char moved_tile_hex_val = temp_board_for_moves.tiles[tile_to_move_r][tile_to_move_c];
|
| 511 |
+
temp_board_for_moves.tiles[original_empty_r][original_empty_c] = moved_tile_hex_val;
|
| 512 |
+
temp_board_for_moves.tiles[tile_to_move_r][tile_to_move_c] = '0';
|
| 513 |
+
temp_board_for_moves.empty_r = tile_to_move_r;
|
| 514 |
+
temp_board_for_moves.empty_c = tile_to_move_c;
|
| 515 |
+
temp_board_for_moves.update_hash_after_move(tile_to_move_r, tile_to_move_c, original_empty_r, original_empty_c);
|
| 516 |
+
|
| 517 |
+
int next_k_beam = parent_k_beam + 1;
|
| 518 |
+
int next_K_total = K_initial_empty_moves + next_k_beam;
|
| 519 |
+
|
| 520 |
+
bool already_reached_better = false;
|
| 521 |
+
auto it_map = min_K_to_reach_by_hash_main.find(temp_board_for_moves.zobrist_hash_value);
|
| 522 |
+
if (it_map != min_K_to_reach_by_hash_main.end()) {
|
| 523 |
+
if (it_map->second <= next_K_total) {
|
| 524 |
+
already_reached_better = true;
|
| 525 |
+
} else {
|
| 526 |
+
it_map->second = next_K_total;
|
| 527 |
+
}
|
| 528 |
+
} else {
|
| 529 |
+
if (min_K_to_reach_by_hash_main.size() < MAX_MIN_K_CACHE_SIZE_CONST) {
|
| 530 |
+
min_K_to_reach_by_hash_main[temp_board_for_moves.zobrist_hash_value] = next_K_total;
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
if(already_reached_better) {
|
| 535 |
+
temp_board_for_moves.tiles[tile_to_move_r][tile_to_move_c] = moved_tile_hex_val;
|
| 536 |
+
temp_board_for_moves.tiles[original_empty_r][original_empty_c] = '0';
|
| 537 |
+
temp_board_for_moves.empty_r = original_empty_r;
|
| 538 |
+
temp_board_for_moves.empty_c = original_empty_c;
|
| 539 |
+
temp_board_for_moves.zobrist_hash_value = original_hash;
|
| 540 |
+
continue;
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
ScoreComponents next_scores = calculate_scores(temp_board_for_moves);
|
| 544 |
+
double next_beam_eval_score = calculate_beam_score(next_scores, next_K_total, temp_board_for_moves);
|
| 545 |
+
|
| 546 |
+
beam_history_storage.push_back({parent_history_idx, current_move_char});
|
| 547 |
+
int new_history_idx = beam_history_storage.size() - 1;
|
| 548 |
+
|
| 549 |
+
candidates_pool.push_back({temp_board_for_moves, next_beam_eval_score, next_k_beam, new_history_idx, move_dir_idx});
|
| 550 |
+
|
| 551 |
+
double current_actual_score_val = calculate_actual_score(next_scores.max_tree_size, next_K_total);
|
| 552 |
+
if (current_actual_score_val > overall_best_actual_score) {
|
| 553 |
+
overall_best_actual_score = current_actual_score_val;
|
| 554 |
+
overall_best_path_str = initial_empty_moves_path + reconstruct_beam_path(new_history_idx);
|
| 555 |
+
} else if (current_actual_score_val == overall_best_actual_score) {
|
| 556 |
+
// Prefer shorter paths for same score
|
| 557 |
+
if ((initial_empty_moves_path + reconstruct_beam_path(new_history_idx)).length() < overall_best_path_str.length()){
|
| 558 |
+
overall_best_path_str = initial_empty_moves_path + reconstruct_beam_path(new_history_idx);
|
| 559 |
+
}
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
temp_board_for_moves.tiles[tile_to_move_r][tile_to_move_c] = moved_tile_hex_val;
|
| 563 |
+
temp_board_for_moves.tiles[original_empty_r][original_empty_c] = '0';
|
| 564 |
+
temp_board_for_moves.empty_r = original_empty_r;
|
| 565 |
+
temp_board_for_moves.empty_c = original_empty_c;
|
| 566 |
+
temp_board_for_moves.zobrist_hash_value = original_hash;
|
| 567 |
+
}
|
| 568 |
+
}
|
| 569 |
+
|
| 570 |
+
if (candidates_pool.empty()) break;
|
| 571 |
+
|
| 572 |
+
std::sort(candidates_pool.begin(), candidates_pool.end());
|
| 573 |
+
|
| 574 |
+
next_beam_states_temp.clear();
|
| 575 |
+
int num_elites = std::min(static_cast<int>(candidates_pool.size()), static_cast<int>(beam_width * elite_ratio));
|
| 576 |
+
num_elites = std::max(0, num_elites);
|
| 577 |
+
|
| 578 |
+
for(int i=0; i < num_elites && i < static_cast<int>(candidates_pool.size()); ++i) {
|
| 579 |
+
next_beam_states_temp.push_back(candidates_pool[i]);
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
if (next_beam_states_temp.size() < static_cast<size_t>(beam_width) && candidates_pool.size() > static_cast<size_t>(num_elites)) {
|
| 583 |
+
stochastic_selection_indices.clear();
|
| 584 |
+
int pool_start_idx = num_elites;
|
| 585 |
+
int pool_end_idx = std::min(static_cast<int>(candidates_pool.size()), num_elites + stochastic_sample_pool_factor * beam_width);
|
| 586 |
+
|
| 587 |
+
for(int i = pool_start_idx; i < pool_end_idx; ++i) {
|
| 588 |
+
stochastic_selection_indices.push_back(i);
|
| 589 |
+
}
|
| 590 |
+
if (!stochastic_selection_indices.empty()){
|
| 591 |
+
std::shuffle(stochastic_selection_indices.begin(), stochastic_selection_indices.end(), rng_stochastic_selection_main);
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
for(size_t i=0; i < stochastic_selection_indices.size() && next_beam_states_temp.size() < static_cast<size_t>(beam_width); ++i) {
|
| 595 |
+
next_beam_states_temp.push_back(candidates_pool[stochastic_selection_indices[i]]);
|
| 596 |
+
}
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
current_beam = next_beam_states_temp;
|
| 600 |
+
if (current_beam.empty()) break;
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
std::cout << overall_best_path_str << std::endl;
|
| 604 |
+
|
| 605 |
+
return 0;
|
| 606 |
+
}
|
| 607 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale_agent_best/ahc015.cpp
ADDED
|
@@ -0,0 +1,491 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <array>
|
| 6 |
+
#include <numeric>
|
| 7 |
+
#include <algorithm>
|
| 8 |
+
#include <cmath>
|
| 9 |
+
#include <limits>
|
| 10 |
+
#include <chrono> // For seeding RNG
|
| 11 |
+
// #include <iomanip> // For debugging output
|
| 12 |
+
|
| 13 |
+
// Constants
|
| 14 |
+
const int GRID_SIZE = 10;
|
| 15 |
+
const int NUM_TURNS = 100;
|
| 16 |
+
const int NUM_FLAVORS = 3; // Flavors are 1, 2, 3
|
| 17 |
+
|
| 18 |
+
// Directions: F, B, L, R (Up, Down, Left, Right on typical grid with (0,0) top-left)
|
| 19 |
+
const int DR[] = {-1, 1, 0, 0};
|
| 20 |
+
const int DC[] = {0, 0, -1, 1};
|
| 21 |
+
const char DIR_CHARS[] = {'F', 'B', 'L', 'R'};
|
| 22 |
+
const int NUM_DIRECTIONS = 4;
|
| 23 |
+
|
| 24 |
+
// Global data initialized once
|
| 25 |
+
std::array<int, NUM_TURNS> G_FLAVOR_SEQUENCE;
|
| 26 |
+
std::array<int, NUM_FLAVORS + 1> G_flavor_total_counts;
|
| 27 |
+
std::array<std::pair<int, int>, NUM_FLAVORS + 1> G_target_col_ranges;
|
| 28 |
+
std::array<bool, NUM_FLAVORS + 1> G_flavor_active;
|
| 29 |
+
|
| 30 |
+
// Lookahead parameters
|
| 31 |
+
const int MAX_LOOKAHEAD_DEPTH = 2;
|
| 32 |
+
// Final Iteration: Reverted to sample counts from Iteration 2, which scored highest.
|
| 33 |
+
static constexpr std::array<int, MAX_LOOKAHEAD_DEPTH> NUM_SAMPLES_CONFIG = {23, 9};
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
struct XorshiftRNG {
|
| 37 |
+
uint64_t x;
|
| 38 |
+
XorshiftRNG() : x(std::chrono::steady_clock::now().time_since_epoch().count()) {}
|
| 39 |
+
|
| 40 |
+
uint64_t next() {
|
| 41 |
+
x ^= x << 13;
|
| 42 |
+
x ^= x >> 7;
|
| 43 |
+
x ^= x << 17;
|
| 44 |
+
return x;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
int uniform_int(int min_val, int max_val) {
|
| 48 |
+
if (min_val > max_val) return min_val;
|
| 49 |
+
if (min_val == max_val) return min_val;
|
| 50 |
+
uint64_t range = static_cast<uint64_t>(max_val) - min_val + 1;
|
| 51 |
+
return min_val + static_cast<int>(next() % range);
|
| 52 |
+
}
|
| 53 |
+
};
|
| 54 |
+
XorshiftRNG rng;
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
struct Candy {
|
| 58 |
+
int r, c, flavor;
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct GameState {
|
| 62 |
+
std::array<std::array<int, GRID_SIZE>, GRID_SIZE> board;
|
| 63 |
+
std::vector<Candy> candies_list;
|
| 64 |
+
int turn_num_1_indexed;
|
| 65 |
+
|
| 66 |
+
GameState() : turn_num_1_indexed(0) {
|
| 67 |
+
for (int i = 0; i < GRID_SIZE; ++i) {
|
| 68 |
+
board[i].fill(0);
|
| 69 |
+
}
|
| 70 |
+
candies_list.reserve(NUM_TURNS);
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
GameState(const GameState& other) = default;
|
| 74 |
+
GameState& operator=(const GameState& other) = default;
|
| 75 |
+
GameState(GameState&& other) noexcept = default;
|
| 76 |
+
GameState& operator=(GameState&& other) noexcept = default;
|
| 77 |
+
|
| 78 |
+
void place_candy(int r, int c, int flavor) {
|
| 79 |
+
board[r][c] = flavor;
|
| 80 |
+
candies_list.push_back({r, c, flavor});
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
std::pair<int, int> find_pth_empty_cell(int p_1_indexed) const {
|
| 84 |
+
int count = 0;
|
| 85 |
+
for (int r_idx = 0; r_idx < GRID_SIZE; ++r_idx) {
|
| 86 |
+
for (int c_idx = 0; c_idx < GRID_SIZE; ++c_idx) {
|
| 87 |
+
if (board[r_idx][c_idx] == 0) {
|
| 88 |
+
count++;
|
| 89 |
+
if (count == p_1_indexed) {
|
| 90 |
+
return {r_idx, c_idx};
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
return {-1, -1};
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
int count_empty_cells() const {
|
| 99 |
+
return GRID_SIZE * GRID_SIZE - static_cast<int>(candies_list.size());
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
void apply_tilt(int dir_idx) {
|
| 103 |
+
if (dir_idx == 0) { // F (Up)
|
| 104 |
+
for (int c = 0; c < GRID_SIZE; ++c) {
|
| 105 |
+
int current_write_r = 0;
|
| 106 |
+
for (int r = 0; r < GRID_SIZE; ++r) {
|
| 107 |
+
if (board[r][c] != 0) {
|
| 108 |
+
if (r != current_write_r) {
|
| 109 |
+
board[current_write_r][c] = board[r][c];
|
| 110 |
+
board[r][c] = 0;
|
| 111 |
+
}
|
| 112 |
+
current_write_r++;
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
}
|
| 116 |
+
} else if (dir_idx == 1) { // B (Down)
|
| 117 |
+
for (int c = 0; c < GRID_SIZE; ++c) {
|
| 118 |
+
int current_write_r = GRID_SIZE - 1;
|
| 119 |
+
for (int r = GRID_SIZE - 1; r >= 0; --r) {
|
| 120 |
+
if (board[r][c] != 0) {
|
| 121 |
+
if (r != current_write_r) {
|
| 122 |
+
board[current_write_r][c] = board[r][c];
|
| 123 |
+
board[r][c] = 0;
|
| 124 |
+
}
|
| 125 |
+
current_write_r--;
|
| 126 |
+
}
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
} else if (dir_idx == 2) { // L (Left)
|
| 130 |
+
for (int r = 0; r < GRID_SIZE; ++r) {
|
| 131 |
+
int current_write_c = 0;
|
| 132 |
+
for (int c = 0; c < GRID_SIZE; ++c) {
|
| 133 |
+
if (board[r][c] != 0) {
|
| 134 |
+
if (c != current_write_c) {
|
| 135 |
+
board[r][current_write_c] = board[r][c];
|
| 136 |
+
board[r][c] = 0;
|
| 137 |
+
}
|
| 138 |
+
current_write_c++;
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
}
|
| 142 |
+
} else { // R (Right, dir_idx == 3)
|
| 143 |
+
for (int r = 0; r < GRID_SIZE; ++r) {
|
| 144 |
+
int current_write_c = GRID_SIZE - 1;
|
| 145 |
+
for (int c = GRID_SIZE - 1; c >= 0; --c) {
|
| 146 |
+
if (board[r][c] != 0) {
|
| 147 |
+
if (c != current_write_c) {
|
| 148 |
+
board[r][current_write_c] = board[r][c];
|
| 149 |
+
board[r][c] = 0;
|
| 150 |
+
}
|
| 151 |
+
current_write_c--;
|
| 152 |
+
}
|
| 153 |
+
}
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
rebuild_candies_list_from_board();
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
void rebuild_candies_list_from_board() {
|
| 160 |
+
candies_list.clear();
|
| 161 |
+
for (int r_idx = 0; r_idx < GRID_SIZE; ++r_idx) {
|
| 162 |
+
for (int c_idx = 0; c_idx < GRID_SIZE; ++c_idx) {
|
| 163 |
+
if (board[r_idx][c_idx] != 0) {
|
| 164 |
+
candies_list.push_back({r_idx, c_idx, board[r_idx][c_idx]});
|
| 165 |
+
}
|
| 166 |
+
}
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
long long calculate_sum_sq_comp_size() const {
|
| 171 |
+
long long total_sq_sum = 0;
|
| 172 |
+
std::array<std::array<bool, GRID_SIZE>, GRID_SIZE> visited;
|
| 173 |
+
for (int i = 0; i < GRID_SIZE; ++i) visited[i].fill(false);
|
| 174 |
+
|
| 175 |
+
std::array<std::pair<int, int>, GRID_SIZE * GRID_SIZE> q_arr;
|
| 176 |
+
|
| 177 |
+
for (int r_start = 0; r_start < GRID_SIZE; ++r_start) {
|
| 178 |
+
for (int c_start = 0; c_start < GRID_SIZE; ++c_start) {
|
| 179 |
+
if (board[r_start][c_start] != 0 && !visited[r_start][c_start]) {
|
| 180 |
+
int current_flavor = board[r_start][c_start];
|
| 181 |
+
long long current_comp_size = 0;
|
| 182 |
+
|
| 183 |
+
q_arr[0] = {r_start, c_start};
|
| 184 |
+
visited[r_start][c_start] = true;
|
| 185 |
+
int head = 0;
|
| 186 |
+
int tail = 1;
|
| 187 |
+
|
| 188 |
+
while(head < tail){
|
| 189 |
+
current_comp_size++;
|
| 190 |
+
const std::pair<int,int>& curr_cell = q_arr[head];
|
| 191 |
+
const int curr_r = curr_cell.first;
|
| 192 |
+
const int curr_c = curr_cell.second;
|
| 193 |
+
head++;
|
| 194 |
+
|
| 195 |
+
for (int i = 0; i < NUM_DIRECTIONS; ++i) {
|
| 196 |
+
int nr = curr_r + DR[i];
|
| 197 |
+
int nc = curr_c + DC[i];
|
| 198 |
+
if (nr >= 0 && nr < GRID_SIZE && nc >= 0 && nc < GRID_SIZE &&
|
| 199 |
+
!visited[nr][nc] && board[nr][nc] == current_flavor) {
|
| 200 |
+
visited[nr][nc] = true;
|
| 201 |
+
q_arr[tail++] = {nr, nc};
|
| 202 |
+
}
|
| 203 |
+
}
|
| 204 |
+
}
|
| 205 |
+
total_sq_sum += current_comp_size * current_comp_size;
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
}
|
| 209 |
+
return total_sq_sum;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
double calculate_distance_penalty_CoM() const {
|
| 213 |
+
if (candies_list.empty()) return 0.0;
|
| 214 |
+
|
| 215 |
+
std::array<double, NUM_FLAVORS + 1> sum_r; sum_r.fill(0.0);
|
| 216 |
+
std::array<double, NUM_FLAVORS + 1> sum_c; sum_c.fill(0.0);
|
| 217 |
+
std::array<int, NUM_FLAVORS + 1> counts; counts.fill(0);
|
| 218 |
+
|
| 219 |
+
for (const auto& candy : candies_list) {
|
| 220 |
+
counts[candy.flavor]++;
|
| 221 |
+
sum_r[candy.flavor] += candy.r;
|
| 222 |
+
sum_c[candy.flavor] += candy.c;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
std::array<std::pair<double, double>, NUM_FLAVORS + 1> com_coords;
|
| 226 |
+
for (int fl = 1; fl <= NUM_FLAVORS; ++fl) {
|
| 227 |
+
if (counts[fl] > 0) {
|
| 228 |
+
com_coords[fl] = {sum_r[fl] / counts[fl], sum_c[fl] / counts[fl]};
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
double total_manhattan_dist_penalty = 0;
|
| 233 |
+
for (const auto& candy : candies_list) {
|
| 234 |
+
if (counts[candy.flavor] > 1) {
|
| 235 |
+
const auto& com = com_coords[candy.flavor];
|
| 236 |
+
total_manhattan_dist_penalty += std::abs(static_cast<double>(candy.r) - com.first) +
|
| 237 |
+
std::abs(static_cast<double>(candy.c) - com.second);
|
| 238 |
+
}
|
| 239 |
+
}
|
| 240 |
+
return total_manhattan_dist_penalty;
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
double calculate_region_penalty() const {
|
| 244 |
+
if (candies_list.empty()) return 0.0;
|
| 245 |
+
double penalty = 0.0;
|
| 246 |
+
for (const auto& candy : candies_list) {
|
| 247 |
+
if (!G_flavor_active[candy.flavor]) continue;
|
| 248 |
+
|
| 249 |
+
const auto& range = G_target_col_ranges[candy.flavor];
|
| 250 |
+
int min_target_c = range.first;
|
| 251 |
+
int max_target_c = range.second;
|
| 252 |
+
|
| 253 |
+
if (min_target_c > max_target_c) continue;
|
| 254 |
+
|
| 255 |
+
if (candy.c < min_target_c) {
|
| 256 |
+
penalty += (min_target_c - candy.c);
|
| 257 |
+
} else if (candy.c > max_target_c) {
|
| 258 |
+
penalty += (candy.c - max_target_c);
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
return penalty;
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
double calculate_edge_bonus() const {
|
| 265 |
+
double bonus_val = 0.0;
|
| 266 |
+
const double PER_CANDY_BONUS_FACTOR = 0.5;
|
| 267 |
+
|
| 268 |
+
for (const auto& candy : candies_list) {
|
| 269 |
+
if (!G_flavor_active[candy.flavor]) continue;
|
| 270 |
+
|
| 271 |
+
const auto& range = G_target_col_ranges[candy.flavor];
|
| 272 |
+
int min_target_c = range.first;
|
| 273 |
+
int max_target_c = range.second;
|
| 274 |
+
|
| 275 |
+
if (min_target_c > max_target_c) continue;
|
| 276 |
+
|
| 277 |
+
bool in_correct_strip = (candy.c >= min_target_c && candy.c <= max_target_c);
|
| 278 |
+
|
| 279 |
+
if (in_correct_strip) {
|
| 280 |
+
if (candy.r == 0 || candy.r == GRID_SIZE - 1) {
|
| 281 |
+
bonus_val += PER_CANDY_BONUS_FACTOR;
|
| 282 |
+
}
|
| 283 |
+
if ((candy.c == 0 && min_target_c == 0) ||
|
| 284 |
+
(candy.c == GRID_SIZE - 1 && max_target_c == GRID_SIZE - 1)) {
|
| 285 |
+
bonus_val += PER_CANDY_BONUS_FACTOR;
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
return bonus_val;
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
double evaluate() const {
|
| 293 |
+
if (candies_list.empty() && turn_num_1_indexed == 0) return 0.0;
|
| 294 |
+
|
| 295 |
+
long long sum_sq_comp = calculate_sum_sq_comp_size();
|
| 296 |
+
double dist_penalty_com = calculate_distance_penalty_CoM();
|
| 297 |
+
double region_penalty_val = calculate_region_penalty();
|
| 298 |
+
double edge_bonus_val = calculate_edge_bonus();
|
| 299 |
+
|
| 300 |
+
double current_turn_double = static_cast<double>(turn_num_1_indexed);
|
| 301 |
+
|
| 302 |
+
// Coefficients from Iteration 2 (best scoring), with small tweak to C
|
| 303 |
+
double A_coeff_conn = 15.0 + 1.1 * current_turn_double;
|
| 304 |
+
double B_coeff_com_base = std::max(0.0, 170.0 - 1.7 * current_turn_double);
|
| 305 |
+
// Final iteration tweak for C_coeff_region_penalty_direct:
|
| 306 |
+
double C_coeff_region_penalty_direct = std::max(2.0, 27.0 - 0.17 * current_turn_double);
|
| 307 |
+
double D_coeff_edge_bonus = 5.0 + 0.2 * current_turn_double;
|
| 308 |
+
|
| 309 |
+
return A_coeff_conn * sum_sq_comp
|
| 310 |
+
- B_coeff_com_base * dist_penalty_com
|
| 311 |
+
- C_coeff_region_penalty_direct * region_penalty_val
|
| 312 |
+
+ D_coeff_edge_bonus * edge_bonus_val;
|
| 313 |
+
}
|
| 314 |
+
};
|
| 315 |
+
|
| 316 |
+
// Forward declaration
|
| 317 |
+
double eval_lookahead(const GameState& state_after_tilt, int turn_T_of_candy_just_processed, int depth_remaining);
|
| 318 |
+
|
| 319 |
+
char decide_tilt_direction_logic(const GameState& current_gs_after_placement) {
|
| 320 |
+
double best_overall_eval = std::numeric_limits<double>::lowest();
|
| 321 |
+
int best_dir_idx = 0;
|
| 322 |
+
|
| 323 |
+
int turn_T_for_lookahead_base = current_gs_after_placement.turn_num_1_indexed;
|
| 324 |
+
|
| 325 |
+
for (int i = 0; i < NUM_DIRECTIONS; ++i) {
|
| 326 |
+
GameState gs_after_tilt_T = current_gs_after_placement;
|
| 327 |
+
gs_after_tilt_T.apply_tilt(i);
|
| 328 |
+
|
| 329 |
+
double current_tilt_eval_for_dir_i = eval_lookahead(gs_after_tilt_T, turn_T_for_lookahead_base, MAX_LOOKAHEAD_DEPTH);
|
| 330 |
+
|
| 331 |
+
if (current_tilt_eval_for_dir_i > best_overall_eval) {
|
| 332 |
+
best_overall_eval = current_tilt_eval_for_dir_i;
|
| 333 |
+
best_dir_idx = i;
|
| 334 |
+
}
|
| 335 |
+
}
|
| 336 |
+
return DIR_CHARS[best_dir_idx];
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
double eval_lookahead(const GameState& state_after_tilt, int turn_T_of_candy_just_processed, int depth_remaining) {
|
| 341 |
+
if (depth_remaining == 0 || turn_T_of_candy_just_processed == NUM_TURNS) {
|
| 342 |
+
return state_after_tilt.evaluate();
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
int num_empty = state_after_tilt.count_empty_cells();
|
| 346 |
+
if (num_empty == 0) {
|
| 347 |
+
return state_after_tilt.evaluate();
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
int next_candy_flavor = G_FLAVOR_SEQUENCE[turn_T_of_candy_just_processed];
|
| 351 |
+
int sample_count_param_idx = MAX_LOOKAHEAD_DEPTH - depth_remaining;
|
| 352 |
+
int sample_count_this_depth = NUM_SAMPLES_CONFIG[sample_count_param_idx];
|
| 353 |
+
int actual_num_samples = std::min(sample_count_this_depth, num_empty);
|
| 354 |
+
|
| 355 |
+
if (actual_num_samples == 0) {
|
| 356 |
+
return state_after_tilt.evaluate();
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
double sum_over_sampled_placements = 0.0;
|
| 360 |
+
for (int s = 0; s < actual_num_samples; ++s) {
|
| 361 |
+
int p_val_1_indexed_sample;
|
| 362 |
+
if (actual_num_samples == num_empty) {
|
| 363 |
+
p_val_1_indexed_sample = s + 1;
|
| 364 |
+
} else {
|
| 365 |
+
p_val_1_indexed_sample = rng.uniform_int(1, num_empty);
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
GameState S_after_placement = state_after_tilt;
|
| 369 |
+
std::pair<int, int> candy_loc = S_after_placement.find_pth_empty_cell(p_val_1_indexed_sample);
|
| 370 |
+
S_after_placement.place_candy(candy_loc.first, candy_loc.second, next_candy_flavor);
|
| 371 |
+
S_after_placement.turn_num_1_indexed = turn_T_of_candy_just_processed + 1;
|
| 372 |
+
|
| 373 |
+
double max_eval_for_this_placement = std::numeric_limits<double>::lowest();
|
| 374 |
+
for (int dir_idx_next_tilt = 0; dir_idx_next_tilt < NUM_DIRECTIONS; ++dir_idx_next_tilt) {
|
| 375 |
+
GameState S_after_next_tilt = S_after_placement;
|
| 376 |
+
S_after_next_tilt.apply_tilt(dir_idx_next_tilt);
|
| 377 |
+
double val = eval_lookahead(S_after_next_tilt, S_after_placement.turn_num_1_indexed, depth_remaining - 1);
|
| 378 |
+
if (val > max_eval_for_this_placement) {
|
| 379 |
+
max_eval_for_this_placement = val;
|
| 380 |
+
}
|
| 381 |
+
}
|
| 382 |
+
sum_over_sampled_placements += max_eval_for_this_placement;
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
return sum_over_sampled_placements / actual_num_samples;
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
void initialize_global_data() {
|
| 390 |
+
G_flavor_total_counts.fill(0);
|
| 391 |
+
for (int t = 0; t < NUM_TURNS; ++t) {
|
| 392 |
+
std::cin >> G_FLAVOR_SEQUENCE[t];
|
| 393 |
+
G_flavor_total_counts[G_FLAVOR_SEQUENCE[t]]++;
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
G_flavor_active.fill(false);
|
| 397 |
+
std::vector<std::pair<int, int>> sorter_flavor_count_id;
|
| 398 |
+
for (int fl = 1; fl <= NUM_FLAVORS; ++fl) {
|
| 399 |
+
if (G_flavor_total_counts[fl] > 0) {
|
| 400 |
+
G_flavor_active[fl] = true;
|
| 401 |
+
sorter_flavor_count_id.push_back({G_flavor_total_counts[fl], fl});
|
| 402 |
+
}
|
| 403 |
+
}
|
| 404 |
+
std::sort(sorter_flavor_count_id.begin(), sorter_flavor_count_id.end(),
|
| 405 |
+
[](const std::pair<int, int>& a, const std::pair<int, int>& b) {
|
| 406 |
+
if (a.first != b.first) {
|
| 407 |
+
return a.first > b.first;
|
| 408 |
+
}
|
| 409 |
+
return a.second < b.second;
|
| 410 |
+
});
|
| 411 |
+
|
| 412 |
+
std::vector<int> active_flavor_ids_sorted_by_priority;
|
| 413 |
+
for(const auto& p : sorter_flavor_count_id) {
|
| 414 |
+
active_flavor_ids_sorted_by_priority.push_back(p.second);
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
std::vector<int> assigned_widths(NUM_FLAVORS + 1, 0);
|
| 418 |
+
int total_assigned_width_sum = 0;
|
| 419 |
+
|
| 420 |
+
if (!active_flavor_ids_sorted_by_priority.empty()) {
|
| 421 |
+
double total_candies_for_proportion = 0;
|
| 422 |
+
for(int fl_id : active_flavor_ids_sorted_by_priority) {
|
| 423 |
+
total_candies_for_proportion += G_flavor_total_counts[fl_id];
|
| 424 |
+
}
|
| 425 |
+
if (total_candies_for_proportion == 0) total_candies_for_proportion = 1;
|
| 426 |
+
|
| 427 |
+
for (int fl_id : active_flavor_ids_sorted_by_priority) {
|
| 428 |
+
assigned_widths[fl_id] = static_cast<int>(std::floor(
|
| 429 |
+
static_cast<double>(GRID_SIZE) * G_flavor_total_counts[fl_id] / total_candies_for_proportion
|
| 430 |
+
));
|
| 431 |
+
total_assigned_width_sum += assigned_widths[fl_id];
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
int remaining_width_to_assign = GRID_SIZE - total_assigned_width_sum;
|
| 435 |
+
for (int i = 0; i < remaining_width_to_assign; ++i) {
|
| 436 |
+
assigned_widths[active_flavor_ids_sorted_by_priority[i % active_flavor_ids_sorted_by_priority.size()]]++;
|
| 437 |
+
}
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
int current_col_start = 0;
|
| 441 |
+
for (int fl_id_in_sorted_order : active_flavor_ids_sorted_by_priority) {
|
| 442 |
+
if (assigned_widths[fl_id_in_sorted_order] > 0) {
|
| 443 |
+
G_target_col_ranges[fl_id_in_sorted_order] = {current_col_start, current_col_start + assigned_widths[fl_id_in_sorted_order] - 1};
|
| 444 |
+
current_col_start += assigned_widths[fl_id_in_sorted_order];
|
| 445 |
+
} else {
|
| 446 |
+
G_target_col_ranges[fl_id_in_sorted_order] = {current_col_start, current_col_start - 1};
|
| 447 |
+
}
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
for (int fl = 1; fl <= NUM_FLAVORS; ++fl) {
|
| 451 |
+
if (!G_flavor_active[fl]) {
|
| 452 |
+
G_target_col_ranges[fl] = {0, -1};
|
| 453 |
+
}
|
| 454 |
+
}
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
int main() {
|
| 459 |
+
std::ios_base::sync_with_stdio(false);
|
| 460 |
+
std::cin.tie(NULL);
|
| 461 |
+
|
| 462 |
+
initialize_global_data();
|
| 463 |
+
|
| 464 |
+
GameState current_gs;
|
| 465 |
+
for (int t_0_indexed = 0; t_0_indexed < NUM_TURNS; ++t_0_indexed) {
|
| 466 |
+
current_gs.turn_num_1_indexed = t_0_indexed + 1;
|
| 467 |
+
|
| 468 |
+
int p_val_1_indexed;
|
| 469 |
+
std::cin >> p_val_1_indexed;
|
| 470 |
+
|
| 471 |
+
std::pair<int, int> candy_loc = current_gs.find_pth_empty_cell(p_val_1_indexed);
|
| 472 |
+
|
| 473 |
+
current_gs.place_candy(candy_loc.first, candy_loc.second, G_FLAVOR_SEQUENCE[t_0_indexed]);
|
| 474 |
+
|
| 475 |
+
char chosen_dir_char = decide_tilt_direction_logic(current_gs);
|
| 476 |
+
|
| 477 |
+
std::cout << chosen_dir_char << std::endl;
|
| 478 |
+
|
| 479 |
+
int dir_idx_to_apply = 0;
|
| 480 |
+
for(int k=0; k<NUM_DIRECTIONS; ++k) {
|
| 481 |
+
if(DIR_CHARS[k] == chosen_dir_char) {
|
| 482 |
+
dir_idx_to_apply = k;
|
| 483 |
+
break;
|
| 484 |
+
}
|
| 485 |
+
}
|
| 486 |
+
current_gs.apply_tilt(dir_idx_to_apply);
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
return 0;
|
| 490 |
+
}
|
| 491 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale_agent_best/ahc024.cpp
ADDED
|
@@ -0,0 +1,481 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#pragma GCC optimize("O3,unroll-loops")
|
| 3 |
+
|
| 4 |
+
#include <iostream>
|
| 5 |
+
#include <vector>
|
| 6 |
+
#include <map> // For temp_adj_deltas_map_global
|
| 7 |
+
#include <queue>
|
| 8 |
+
#include <algorithm> // For std::min, std::max, std::sort, std::unique, std::shuffle
|
| 9 |
+
#include <random> // For XorShift and std::shuffle
|
| 10 |
+
#include <chrono>
|
| 11 |
+
#include <utility> // For std::pair
|
| 12 |
+
#include <cmath> // For std::exp, std::pow
|
| 13 |
+
#include <climits> // For UINT_MAX
|
| 14 |
+
|
| 15 |
+
// --- Globals ---
|
| 16 |
+
const int N_FIXED = 50;
|
| 17 |
+
const int M_FIXED = 100; // Max ward ID, problem states M=100
|
| 18 |
+
|
| 19 |
+
std::vector<std::vector<int>> current_grid_state(N_FIXED, std::vector<int>(N_FIXED));
|
| 20 |
+
std::vector<std::vector<int>> best_grid_state(N_FIXED, std::vector<int>(N_FIXED));
|
| 21 |
+
int best_score_val = -1; // Stores count of 0-cells for the best state
|
| 22 |
+
|
| 23 |
+
struct XorShift {
|
| 24 |
+
unsigned int x, y, z, w;
|
| 25 |
+
XorShift() {
|
| 26 |
+
// Using std::random_device for better seed initialization
|
| 27 |
+
std::random_device rd;
|
| 28 |
+
x = rd();
|
| 29 |
+
y = rd();
|
| 30 |
+
z = rd();
|
| 31 |
+
w = rd();
|
| 32 |
+
// Ensure no zero initial state for w, which is common if rd() produces same values or all are 0
|
| 33 |
+
if (x == 0 && y == 0 && z == 0 && w == 0) w = 1; // Or any non-zero value
|
| 34 |
+
}
|
| 35 |
+
unsigned int next_uint() {
|
| 36 |
+
unsigned int t = x;
|
| 37 |
+
t ^= t << 11;
|
| 38 |
+
t ^= t >> 8;
|
| 39 |
+
x = y; y = z; z = w;
|
| 40 |
+
w ^= w >> 19;
|
| 41 |
+
w ^= t;
|
| 42 |
+
return w;
|
| 43 |
+
}
|
| 44 |
+
double next_double() { // In [0,1)
|
| 45 |
+
return (double)next_uint() / ((double)UINT_MAX + 1.0);
|
| 46 |
+
}
|
| 47 |
+
int next_int(int exclusive_max_val) { // In [0, exclusive_max_val - 1]
|
| 48 |
+
if (exclusive_max_val <= 0) return 0;
|
| 49 |
+
return next_uint() % exclusive_max_val;
|
| 50 |
+
}
|
| 51 |
+
// For std::shuffle
|
| 52 |
+
using result_type = unsigned int;
|
| 53 |
+
static constexpr unsigned int min() { return 0; }
|
| 54 |
+
static constexpr unsigned int max() { return UINT_MAX; }
|
| 55 |
+
unsigned int operator()() { return next_uint(); }
|
| 56 |
+
};
|
| 57 |
+
XorShift rnd_gen; // Global instance
|
| 58 |
+
auto G_START_TIME = std::chrono::high_resolution_clock::now();
|
| 59 |
+
|
| 60 |
+
double time_elapsed_ms() {
|
| 61 |
+
auto now = std::chrono::high_resolution_clock::now();
|
| 62 |
+
return std::chrono::duration<double, std::milli>(now - G_START_TIME).count();
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
struct AdjacencyInfo {
|
| 66 |
+
bool matrix[M_FIXED + 1][M_FIXED + 1];
|
| 67 |
+
AdjacencyInfo() {
|
| 68 |
+
for (int i = 0; i <= M_FIXED; ++i) for (int j = 0; j <= M_FIXED; ++j) matrix[i][j] = false;
|
| 69 |
+
}
|
| 70 |
+
void set_adj(int c1, int c2) {
|
| 71 |
+
if (c1 == c2) return;
|
| 72 |
+
matrix[std::min(c1, c2)][std::max(c1, c2)] = true;
|
| 73 |
+
}
|
| 74 |
+
bool is_adj(int c1, int c2) const {
|
| 75 |
+
if (c1 == c2) return false;
|
| 76 |
+
return matrix[std::min(c1, c2)][std::max(c1, c2)];
|
| 77 |
+
}
|
| 78 |
+
};
|
| 79 |
+
AdjacencyInfo required_adjacencies;
|
| 80 |
+
bool ward_has_any_req_adj[M_FIXED + 1];
|
| 81 |
+
|
| 82 |
+
struct BorderEdgeTracker {
|
| 83 |
+
int counts_arr[M_FIXED + 1][M_FIXED + 1];
|
| 84 |
+
BorderEdgeTracker() { clear(); }
|
| 85 |
+
void add_edge(int c1, int c2) {
|
| 86 |
+
if (c1 == c2) return;
|
| 87 |
+
counts_arr[std::min(c1, c2)][std::max(c1, c2)]++;
|
| 88 |
+
}
|
| 89 |
+
void remove_edge(int c1, int c2) {
|
| 90 |
+
if (c1 == c2) return;
|
| 91 |
+
counts_arr[std::min(c1, c2)][std::max(c1, c2)]--;
|
| 92 |
+
}
|
| 93 |
+
int get_count(int c1, int c2) const {
|
| 94 |
+
if (c1 == c2) return 0;
|
| 95 |
+
return counts_arr[std::min(c1, c2)][std::max(c1, c2)];
|
| 96 |
+
}
|
| 97 |
+
void clear() {
|
| 98 |
+
for (int i = 0; i <= M_FIXED; ++i) for (int j = 0; j <= M_FIXED; ++j) counts_arr[i][j] = 0;
|
| 99 |
+
}
|
| 100 |
+
};
|
| 101 |
+
BorderEdgeTracker current_border_edges_tracker;
|
| 102 |
+
|
| 103 |
+
std::vector<std::vector<std::pair<int, int>>> cells_by_color(M_FIXED + 1);
|
| 104 |
+
std::vector<std::vector<int>> pos_in_color_list(N_FIXED, std::vector<int>(N_FIXED));
|
| 105 |
+
|
| 106 |
+
unsigned int visited_marker_grid[N_FIXED][N_FIXED];
|
| 107 |
+
unsigned int current_visit_marker = 0;
|
| 108 |
+
|
| 109 |
+
std::queue<std::pair<int, int>> q_bfs_global;
|
| 110 |
+
|
| 111 |
+
const int DR[] = {-1, 1, 0, 0};
|
| 112 |
+
const int DC[] = {0, 0, -1, 1};
|
| 113 |
+
|
| 114 |
+
inline bool is_cell_on_grid(int r, int c) { return r >= 0 && r < N_FIXED && c >= 0 && c < N_FIXED; }
|
| 115 |
+
|
| 116 |
+
void increment_bfs_marker() {
|
| 117 |
+
current_visit_marker++;
|
| 118 |
+
if (current_visit_marker == 0) {
|
| 119 |
+
for (int i = 0; i < N_FIXED; ++i) {
|
| 120 |
+
for (int j = 0; j < N_FIXED; ++j) {
|
| 121 |
+
visited_marker_grid[i][j] = 0;
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
current_visit_marker = 1;
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
void clear_global_bfs_queue() {
|
| 129 |
+
std::queue<std::pair<int, int>> empty_queue;
|
| 130 |
+
std::swap(q_bfs_global, empty_queue);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
void add_cell_to_color_ds(int r, int c, int color) {
|
| 134 |
+
cells_by_color[color].push_back({r,c});
|
| 135 |
+
pos_in_color_list[r][c] = cells_by_color[color].size() - 1;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
void remove_cell_from_color_ds(int r, int c, int color) {
|
| 139 |
+
int idx_to_remove = pos_in_color_list[r][c];
|
| 140 |
+
std::pair<int,int> last_cell = cells_by_color[color].back();
|
| 141 |
+
|
| 142 |
+
cells_by_color[color][idx_to_remove] = last_cell;
|
| 143 |
+
pos_in_color_list[last_cell.first][last_cell.second] = idx_to_remove;
|
| 144 |
+
|
| 145 |
+
cells_by_color[color].pop_back();
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
void initialize_all_data_structures(const std::vector<std::vector<int>>& initial_grid) {
|
| 149 |
+
required_adjacencies = AdjacencyInfo();
|
| 150 |
+
current_border_edges_tracker.clear();
|
| 151 |
+
for(int i=0; i <= M_FIXED; ++i) cells_by_color[i].clear();
|
| 152 |
+
|
| 153 |
+
for (int i = 0; i < N_FIXED; ++i) {
|
| 154 |
+
for (int j = 0; j < N_FIXED; ++j) {
|
| 155 |
+
current_grid_state[i][j] = initial_grid[i][j];
|
| 156 |
+
add_cell_to_color_ds(i, j, initial_grid[i][j]);
|
| 157 |
+
}
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
for (int i = 0; i < N_FIXED; ++i) {
|
| 161 |
+
for (int j = 0; j < N_FIXED; ++j) {
|
| 162 |
+
int initial_color_val = initial_grid[i][j];
|
| 163 |
+
if (i == 0 || i == N_FIXED - 1 || j == 0 || j == N_FIXED - 1) {
|
| 164 |
+
required_adjacencies.set_adj(0, initial_color_val);
|
| 165 |
+
}
|
| 166 |
+
if (j + 1 < N_FIXED && initial_color_val != initial_grid[i][j+1]) {
|
| 167 |
+
required_adjacencies.set_adj(initial_color_val, initial_grid[i][j+1]);
|
| 168 |
+
}
|
| 169 |
+
if (i + 1 < N_FIXED && initial_color_val != initial_grid[i+1][j]) {
|
| 170 |
+
required_adjacencies.set_adj(initial_color_val, initial_grid[i+1][j]);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
int current_color_val = current_grid_state[i][j];
|
| 174 |
+
if (i == 0) current_border_edges_tracker.add_edge(0, current_color_val);
|
| 175 |
+
if (i == N_FIXED - 1) current_border_edges_tracker.add_edge(0, current_color_val);
|
| 176 |
+
if (j == 0) current_border_edges_tracker.add_edge(0, current_color_val);
|
| 177 |
+
if (j == N_FIXED - 1) current_border_edges_tracker.add_edge(0, current_color_val);
|
| 178 |
+
|
| 179 |
+
if (j + 1 < N_FIXED && current_color_val != current_grid_state[i][j+1]) {
|
| 180 |
+
current_border_edges_tracker.add_edge(current_color_val, current_grid_state[i][j+1]);
|
| 181 |
+
}
|
| 182 |
+
if (i + 1 < N_FIXED && current_color_val != current_grid_state[i+1][j]) {
|
| 183 |
+
current_border_edges_tracker.add_edge(current_color_val, current_grid_state[i+1][j]);
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
for (int c1 = 0; c1 <= M_FIXED; ++c1) {
|
| 189 |
+
ward_has_any_req_adj[c1] = false;
|
| 190 |
+
for (int c2 = 0; c2 <= M_FIXED; ++c2) {
|
| 191 |
+
if (c1 == c2) continue;
|
| 192 |
+
if (required_adjacencies.is_adj(c1, c2)) {
|
| 193 |
+
ward_has_any_req_adj[c1] = true;
|
| 194 |
+
break;
|
| 195 |
+
}
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
best_grid_state = current_grid_state;
|
| 200 |
+
best_score_val = cells_by_color[0].size();
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
bool check_region_connectivity_bfs(int target_color) {
|
| 204 |
+
const auto& cells_of_target_color = cells_by_color[target_color];
|
| 205 |
+
if (cells_of_target_color.empty()) return true;
|
| 206 |
+
|
| 207 |
+
increment_bfs_marker();
|
| 208 |
+
clear_global_bfs_queue();
|
| 209 |
+
|
| 210 |
+
q_bfs_global.push(cells_of_target_color[0]);
|
| 211 |
+
visited_marker_grid[cells_of_target_color[0].first][cells_of_target_color[0].second] = current_visit_marker;
|
| 212 |
+
|
| 213 |
+
int count_visited_cells = 0;
|
| 214 |
+
while (!q_bfs_global.empty()) {
|
| 215 |
+
std::pair<int, int> curr = q_bfs_global.front();
|
| 216 |
+
q_bfs_global.pop();
|
| 217 |
+
count_visited_cells++;
|
| 218 |
+
|
| 219 |
+
for (int k = 0; k < 4; ++k) {
|
| 220 |
+
int nr = curr.first + DR[k];
|
| 221 |
+
int nc = curr.second + DC[k];
|
| 222 |
+
if (is_cell_on_grid(nr, nc) &&
|
| 223 |
+
current_grid_state[nr][nc] == target_color &&
|
| 224 |
+
visited_marker_grid[nr][nc] != current_visit_marker) {
|
| 225 |
+
visited_marker_grid[nr][nc] = current_visit_marker;
|
| 226 |
+
q_bfs_global.push({nr, nc});
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
return count_visited_cells == cells_of_target_color.size();
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
bool check_region_0_connectivity_full() {
|
| 234 |
+
const auto& cells_c0 = cells_by_color[0];
|
| 235 |
+
if (cells_c0.empty()) {
|
| 236 |
+
return true;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
increment_bfs_marker();
|
| 240 |
+
clear_global_bfs_queue();
|
| 241 |
+
|
| 242 |
+
bool any_boundary_zero_cell_found = false;
|
| 243 |
+
for (const auto& cell_coord : cells_c0) {
|
| 244 |
+
int r = cell_coord.first;
|
| 245 |
+
int c = cell_coord.second;
|
| 246 |
+
if (r == 0 || r == N_FIXED - 1 || c == 0 || c == N_FIXED - 1) {
|
| 247 |
+
if (visited_marker_grid[r][c] != current_visit_marker) {
|
| 248 |
+
q_bfs_global.push(cell_coord);
|
| 249 |
+
visited_marker_grid[r][c] = current_visit_marker;
|
| 250 |
+
}
|
| 251 |
+
any_boundary_zero_cell_found = true;
|
| 252 |
+
}
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
if (!any_boundary_zero_cell_found) {
|
| 256 |
+
return false;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
while (!q_bfs_global.empty()) {
|
| 260 |
+
std::pair<int, int> curr = q_bfs_global.front();
|
| 261 |
+
q_bfs_global.pop();
|
| 262 |
+
|
| 263 |
+
for (int k_dir = 0; k_dir < 4; ++k_dir) {
|
| 264 |
+
int nr = curr.first + DR[k_dir];
|
| 265 |
+
int nc = curr.second + DC[k_dir];
|
| 266 |
+
if (is_cell_on_grid(nr, nc) &&
|
| 267 |
+
current_grid_state[nr][nc] == 0 &&
|
| 268 |
+
visited_marker_grid[nr][nc] != current_visit_marker) {
|
| 269 |
+
visited_marker_grid[nr][nc] = current_visit_marker;
|
| 270 |
+
q_bfs_global.push({nr, nc});
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
for (const auto& cell_coord : cells_c0) {
|
| 276 |
+
if (visited_marker_grid[cell_coord.first][cell_coord.second] != current_visit_marker) {
|
| 277 |
+
return false;
|
| 278 |
+
}
|
| 279 |
+
}
|
| 280 |
+
return true;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
std::map<std::pair<int, int>, int> temp_adj_deltas_map_global;
|
| 284 |
+
|
| 285 |
+
bool attempt_change_cell_color_and_validate(int r, int c, int old_color, int new_color) {
|
| 286 |
+
current_grid_state[r][c] = new_color;
|
| 287 |
+
remove_cell_from_color_ds(r, c, old_color);
|
| 288 |
+
add_cell_to_color_ds(r, c, new_color);
|
| 289 |
+
|
| 290 |
+
temp_adj_deltas_map_global.clear();
|
| 291 |
+
for (int k_adj=0; k_adj<4; ++k_adj) {
|
| 292 |
+
int nr = r + DR[k_adj];
|
| 293 |
+
int nc = c + DC[k_adj];
|
| 294 |
+
int neighbor_actual_color = is_cell_on_grid(nr,nc) ? current_grid_state[nr][nc] : 0;
|
| 295 |
+
|
| 296 |
+
if (old_color != neighbor_actual_color) {
|
| 297 |
+
temp_adj_deltas_map_global[{std::min(old_color, neighbor_actual_color), std::max(old_color, neighbor_actual_color)}]--;
|
| 298 |
+
}
|
| 299 |
+
if (new_color != neighbor_actual_color) {
|
| 300 |
+
temp_adj_deltas_map_global[{std::min(new_color, neighbor_actual_color), std::max(new_color, neighbor_actual_color)}]++;
|
| 301 |
+
}
|
| 302 |
+
}
|
| 303 |
+
for(const auto& entry : temp_adj_deltas_map_global) {
|
| 304 |
+
int c1 = entry.first.first; int c2 = entry.first.second; int delta = entry.second;
|
| 305 |
+
if (delta > 0) for(int i=0; i<delta; ++i) current_border_edges_tracker.add_edge(c1,c2);
|
| 306 |
+
else for(int i=0; i<-delta; ++i) current_border_edges_tracker.remove_edge(c1,c2);
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
bool is_change_valid = true;
|
| 310 |
+
|
| 311 |
+
for(const auto& entry : temp_adj_deltas_map_global) {
|
| 312 |
+
int c1 = entry.first.first; int c2 = entry.first.second;
|
| 313 |
+
bool has_edge_now = current_border_edges_tracker.get_count(c1, c2) > 0;
|
| 314 |
+
bool needs_edge = required_adjacencies.is_adj(c1, c2);
|
| 315 |
+
if (has_edge_now != needs_edge) {
|
| 316 |
+
is_change_valid = false; break;
|
| 317 |
+
}
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
if (is_change_valid && old_color != 0 && cells_by_color[old_color].empty() && ward_has_any_req_adj[old_color]) {
|
| 321 |
+
is_change_valid = false;
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
if (is_change_valid && old_color != 0 && !cells_by_color[old_color].empty()) {
|
| 325 |
+
if (!check_region_connectivity_bfs(old_color)) is_change_valid = false;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
if (is_change_valid && new_color != 0) {
|
| 329 |
+
if (!check_region_connectivity_bfs(new_color)) is_change_valid = false;
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
if (is_change_valid && (old_color == 0 || new_color == 0)) {
|
| 333 |
+
if (!cells_by_color[0].empty()) {
|
| 334 |
+
if (!check_region_0_connectivity_full()) is_change_valid = false;
|
| 335 |
+
} else {
|
| 336 |
+
if (ward_has_any_req_adj[0]) {
|
| 337 |
+
is_change_valid = false;
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
if (!is_change_valid) {
|
| 343 |
+
current_grid_state[r][c] = old_color;
|
| 344 |
+
remove_cell_from_color_ds(r, c, new_color);
|
| 345 |
+
add_cell_to_color_ds(r, c, old_color);
|
| 346 |
+
|
| 347 |
+
for(const auto& entry : temp_adj_deltas_map_global) {
|
| 348 |
+
int c1_ = entry.first.first; int c2_ = entry.first.second; int delta = entry.second;
|
| 349 |
+
if (delta > 0) for(int i=0; i<delta; ++i) current_border_edges_tracker.remove_edge(c1_,c2_);
|
| 350 |
+
else for(int i=0; i<-delta; ++i) current_border_edges_tracker.add_edge(c1_,c2_);
|
| 351 |
+
}
|
| 352 |
+
return false;
|
| 353 |
+
}
|
| 354 |
+
return true;
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
void solve_main_logic() {
|
| 358 |
+
std::vector<std::vector<int>> initial_grid_from_input(N_FIXED, std::vector<int>(N_FIXED));
|
| 359 |
+
for (int i = 0; i < N_FIXED; ++i) for (int j = 0; j < N_FIXED; ++j) std::cin >> initial_grid_from_input[i][j];
|
| 360 |
+
|
| 361 |
+
initialize_all_data_structures(initial_grid_from_input);
|
| 362 |
+
|
| 363 |
+
const double GREEDY_PASS_BUDGET_MS = 300.0;
|
| 364 |
+
double greedy_pass_start_abs_time = time_elapsed_ms();
|
| 365 |
+
|
| 366 |
+
std::vector<std::pair<int,int>> all_cells_shuffled;
|
| 367 |
+
all_cells_shuffled.reserve(N_FIXED * N_FIXED);
|
| 368 |
+
for(int r_idx=0; r_idx<N_FIXED; ++r_idx) for(int c_idx=0; c_idx<N_FIXED; ++c_idx) all_cells_shuffled.push_back({r_idx,c_idx});
|
| 369 |
+
|
| 370 |
+
std::shuffle(all_cells_shuffled.begin(), all_cells_shuffled.end(), rnd_gen);
|
| 371 |
+
for (const auto& cell_coords : all_cells_shuffled) {
|
| 372 |
+
if (time_elapsed_ms() - greedy_pass_start_abs_time > GREEDY_PASS_BUDGET_MS) break;
|
| 373 |
+
|
| 374 |
+
int r = cell_coords.first; int c = cell_coords.second;
|
| 375 |
+
int original_color = current_grid_state[r][c];
|
| 376 |
+
if (original_color == 0) continue;
|
| 377 |
+
|
| 378 |
+
if (attempt_change_cell_color_and_validate(r, c, original_color, 0)) {
|
| 379 |
+
int current_zeros_count = cells_by_color[0].size();
|
| 380 |
+
if (current_zeros_count > best_score_val) {
|
| 381 |
+
best_score_val = current_zeros_count;
|
| 382 |
+
best_grid_state = current_grid_state;
|
| 383 |
+
}
|
| 384 |
+
}
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
double sa_start_temp = 2.0;
|
| 388 |
+
double sa_end_temp = 0.01;
|
| 389 |
+
const double TOTAL_COMPUTATION_TIME_MS = 1950.0;
|
| 390 |
+
|
| 391 |
+
double sa_start_abs_time = time_elapsed_ms();
|
| 392 |
+
double sa_total_duration_ms = TOTAL_COMPUTATION_TIME_MS - sa_start_abs_time;
|
| 393 |
+
if (sa_total_duration_ms <= 0) sa_total_duration_ms = 1.0;
|
| 394 |
+
|
| 395 |
+
int iter_count = 0;
|
| 396 |
+
while(true) {
|
| 397 |
+
iter_count++;
|
| 398 |
+
if(iter_count % 256 == 0) {
|
| 399 |
+
if (time_elapsed_ms() >= TOTAL_COMPUTATION_TIME_MS) break;
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
double time_spent_in_sa = time_elapsed_ms() - sa_start_abs_time;
|
| 403 |
+
double progress_ratio = (sa_total_duration_ms > 1e-9) ? (time_spent_in_sa / sa_total_duration_ms) : 1.0;
|
| 404 |
+
progress_ratio = std::min(progress_ratio, 1.0);
|
| 405 |
+
|
| 406 |
+
double current_temperature = sa_start_temp * std::pow(sa_end_temp / sa_start_temp, progress_ratio);
|
| 407 |
+
current_temperature = std::max(current_temperature, sa_end_temp);
|
| 408 |
+
|
| 409 |
+
int r_coord = rnd_gen.next_int(N_FIXED);
|
| 410 |
+
int c_coord = rnd_gen.next_int(N_FIXED);
|
| 411 |
+
int original_color_at_cell = current_grid_state[r_coord][c_coord];
|
| 412 |
+
|
| 413 |
+
int candidate_new_colors[5];
|
| 414 |
+
int num_candidate_options = 0;
|
| 415 |
+
candidate_new_colors[num_candidate_options++] = 0;
|
| 416 |
+
for(int k_neighbor_idx=0; k_neighbor_idx<4; ++k_neighbor_idx) {
|
| 417 |
+
int nr = r_coord + DR[k_neighbor_idx];
|
| 418 |
+
int nc = c_coord + DC[k_neighbor_idx];
|
| 419 |
+
if (is_cell_on_grid(nr,nc)) {
|
| 420 |
+
candidate_new_colors[num_candidate_options++] = current_grid_state[nr][nc];
|
| 421 |
+
} else {
|
| 422 |
+
candidate_new_colors[num_candidate_options++] = 0;
|
| 423 |
+
}
|
| 424 |
+
}
|
| 425 |
+
int new_proposed_color = candidate_new_colors[rnd_gen.next_int(num_candidate_options)];
|
| 426 |
+
|
| 427 |
+
if (original_color_at_cell == new_proposed_color) continue;
|
| 428 |
+
|
| 429 |
+
int delta_in_score_metric = 0;
|
| 430 |
+
if (new_proposed_color == 0 && original_color_at_cell != 0) delta_in_score_metric = 1;
|
| 431 |
+
else if (new_proposed_color != 0 && original_color_at_cell == 0) delta_in_score_metric = -1;
|
| 432 |
+
|
| 433 |
+
if (attempt_change_cell_color_and_validate(r_coord, c_coord, original_color_at_cell, new_proposed_color)) {
|
| 434 |
+
bool accept_this_move = false;
|
| 435 |
+
if (delta_in_score_metric >= 0) {
|
| 436 |
+
accept_this_move = true;
|
| 437 |
+
if (cells_by_color[0].size() > best_score_val) {
|
| 438 |
+
best_score_val = cells_by_color[0].size();
|
| 439 |
+
best_grid_state = current_grid_state;
|
| 440 |
+
}
|
| 441 |
+
} else {
|
| 442 |
+
if (current_temperature > 1e-9 && rnd_gen.next_double() < std::exp((double)delta_in_score_metric / current_temperature)) {
|
| 443 |
+
accept_this_move = true;
|
| 444 |
+
} else {
|
| 445 |
+
accept_this_move = false;
|
| 446 |
+
}
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
if (!accept_this_move) {
|
| 450 |
+
current_grid_state[r_coord][c_coord] = original_color_at_cell;
|
| 451 |
+
remove_cell_from_color_ds(r_coord, c_coord, new_proposed_color);
|
| 452 |
+
add_cell_to_color_ds(r_coord, c_coord, original_color_at_cell);
|
| 453 |
+
|
| 454 |
+
for(const auto& entry : temp_adj_deltas_map_global) {
|
| 455 |
+
int c1_ = entry.first.first; int c2_ = entry.first.second; int delta = entry.second;
|
| 456 |
+
if (delta > 0) for(int i=0; i<delta; ++i) current_border_edges_tracker.remove_edge(c1_,c2_);
|
| 457 |
+
else for(int i=0; i<-delta; ++i) current_border_edges_tracker.add_edge(c1_,c2_);
|
| 458 |
+
}
|
| 459 |
+
}
|
| 460 |
+
}
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
for (int i = 0; i < N_FIXED; ++i) {
|
| 464 |
+
for (int j = 0; j < N_FIXED; ++j) {
|
| 465 |
+
std::cout << best_grid_state[i][j] << (j == N_FIXED - 1 ? "" : " ");
|
| 466 |
+
}
|
| 467 |
+
std::cout << std::endl;
|
| 468 |
+
}
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
int main() {
|
| 472 |
+
std::ios_base::sync_with_stdio(false); std::cin.tie(NULL);
|
| 473 |
+
G_START_TIME = std::chrono::high_resolution_clock::now();
|
| 474 |
+
|
| 475 |
+
int n_in_dummy, m_in_dummy;
|
| 476 |
+
std::cin >> n_in_dummy >> m_in_dummy;
|
| 477 |
+
|
| 478 |
+
solve_main_logic();
|
| 479 |
+
return 0;
|
| 480 |
+
}
|
| 481 |
+
# EVOLVE-BLOCK-END
|
benchmarks/ale_bench/ale_agent_best/ahc025.cpp
ADDED
|
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# EVOLVE-BLOCK-START
|
| 2 |
+
#include <iostream>
|
| 3 |
+
#include <vector>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <numeric>
|
| 6 |
+
#include <algorithm>
|
| 7 |
+
#include <iomanip>
|
| 8 |
+
#include <cmath>
|
| 9 |
+
#include <set>
|
| 10 |
+
#include <map>
|
| 11 |
+
#include <chrono>
|
| 12 |
+
#include <random>
|
| 13 |
+
|
| 14 |
+
// Timer
|
| 15 |
+
std::chrono::steady_clock::time_point program_start_time;
|
| 16 |
+
std::chrono::milliseconds time_limit_ms(1850);
|
| 17 |
+
|
| 18 |
+
// Global problem parameters and query counter/cache
|
| 19 |
+
int N_items_global, D_groups_global, Q_total_global;
|
| 20 |
+
int queries_made = 0;
|
| 21 |
+
|
| 22 |
+
std::map<std::pair<int, int>, char> comparison_results_cache_1v1;
|
| 23 |
+
std::map<int, std::map<std::pair<int, int>, char>> comparison_results_cache_1v2_specific;
|
| 24 |
+
|
| 25 |
+
std::mt19937 rng_engine;
|
| 26 |
+
|
| 27 |
+
// Function to perform a query via standard I/O
|
| 28 |
+
char perform_query_actual(const std::vector<int>& L_items, const std::vector<int>& R_items) {
|
| 29 |
+
queries_made++;
|
| 30 |
+
// Debug: #c assignments_array[0] ... assignments_array[N-1]
|
| 31 |
+
// std::cout << "# Query " << queries_made << std::endl;
|
| 32 |
+
std::cout << L_items.size() << " " << R_items.size();
|
| 33 |
+
for (int item_idx : L_items) {
|
| 34 |
+
std::cout << " " << item_idx;
|
| 35 |
+
}
|
| 36 |
+
for (int item_idx : R_items) {
|
| 37 |
+
std::cout << " " << item_idx;
|
| 38 |
+
}
|
| 39 |
+
std::cout << std::endl;
|
| 40 |
+
|
| 41 |
+
char result_char;
|
| 42 |
+
std::cin >> result_char;
|
| 43 |
+
return result_char;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
char compare_single_items(int item_idx1, int item_idx2) {
|
| 47 |
+
if (item_idx1 == item_idx2) return '=';
|
| 48 |
+
|
| 49 |
+
std::pair<int, int> query_pair_key = {std::min(item_idx1, item_idx2), std::max(item_idx1, item_idx2)};
|
| 50 |
+
|
| 51 |
+
auto it = comparison_results_cache_1v1.find(query_pair_key);
|
| 52 |
+
if (it != comparison_results_cache_1v1.end()) {
|
| 53 |
+
char cached_res = it->second;
|
| 54 |
+
if (item_idx1 == query_pair_key.first) return cached_res;
|
| 55 |
+
return (cached_res == '<' ? '>' : (cached_res == '>' ? '<' : '='));
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
if (queries_made >= Q_total_global) {
|
| 59 |
+
return '=';
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
char res_direct = perform_query_actual({item_idx1}, {item_idx2});
|
| 63 |
+
|
| 64 |
+
if (item_idx1 < item_idx2) {
|
| 65 |
+
comparison_results_cache_1v1[query_pair_key] = res_direct;
|
| 66 |
+
} else {
|
| 67 |
+
char reversed_res = (res_direct == '<' ? '>' : (res_direct == '>' ? '<' : '='));
|
| 68 |
+
comparison_results_cache_1v1[query_pair_key] = reversed_res;
|
| 69 |
+
}
|
| 70 |
+
return res_direct;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
char compare_1v2_items_specific(int item_curr, int item_prev, int item_s_aux) {
|
| 74 |
+
// Assuming item_curr, item_prev, item_s_aux are distinct indices as per problem context
|
| 75 |
+
// L = {item_curr}, R = {item_prev, item_s_aux}
|
| 76 |
+
// L and R must be disjoint, already true. Each set non-empty.
|
| 77 |
+
// Items within R must be distinct (item_prev != item_s_aux). This is handled by caller logic in X_j estimation.
|
| 78 |
+
|
| 79 |
+
std::pair<int, int> R_pair_key = {std::min(item_prev, item_s_aux), std::max(item_prev, item_s_aux)};
|
| 80 |
+
|
| 81 |
+
auto it_LHS = comparison_results_cache_1v2_specific.find(item_curr);
|
| 82 |
+
if (it_LHS != comparison_results_cache_1v2_specific.end()) {
|
| 83 |
+
auto it_RHS = it_LHS->second.find(R_pair_key);
|
| 84 |
+
if (it_RHS != it_LHS->second.end()) {
|
| 85 |
+
return it_RHS->second;
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
if (queries_made >= Q_total_global) {
|
| 90 |
+
return '=';
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
char res_direct = perform_query_actual({item_curr}, {item_prev, item_s_aux});
|
| 94 |
+
comparison_results_cache_1v2_specific[item_curr][R_pair_key] = res_direct;
|
| 95 |
+
return res_direct;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
void merge_for_sort(std::vector<int>& items_to_sort, int left, int mid, int right) {
|
| 99 |
+
int n1 = mid - left + 1;
|
| 100 |
+
int n2 = right - mid;
|
| 101 |
+
std::vector<int> L_half(n1), R_half(n2);
|
| 102 |
+
for (int i = 0; i < n1; i++) L_half[i] = items_to_sort[left + i];
|
| 103 |
+
for (int j = 0; j < n2; j++) R_half[j] = items_to_sort[mid + 1 + j];
|
| 104 |
+
|
| 105 |
+
int i = 0, j = 0, k = left;
|
| 106 |
+
while (i < n1 && j < n2) {
|
| 107 |
+
char cmp_res = compare_single_items(L_half[i], R_half[j]);
|
| 108 |
+
if (cmp_res == '<' || cmp_res == '=') {
|
| 109 |
+
items_to_sort[k++] = L_half[i++];
|
| 110 |
+
} else {
|
| 111 |
+
items_to_sort[k++] = R_half[j++];
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
while (i < n1) items_to_sort[k++] = L_half[i++];
|
| 115 |
+
while (j < n2) items_to_sort[k++] = R_half[j++];
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
void merge_sort_items(std::vector<int>& items_to_sort, int left, int right) {
|
| 119 |
+
if (left < right) {
|
| 120 |
+
int mid = left + (right - left) / 2;
|
| 121 |
+
merge_sort_items(items_to_sort, left, mid);
|
| 122 |
+
merge_sort_items(items_to_sort, mid + 1, right);
|
| 123 |
+
merge_for_sort(items_to_sort, left, mid, right);
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
long long BASE_WEIGHT = 100000;
|
| 128 |
+
|
| 129 |
+
double estimate_log2(double val) {
|
| 130 |
+
if (val <= 1.0) return 0.0;
|
| 131 |
+
return std::log2(val);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
int calculate_estimated_query_cost(int N_val, int k_pivots_val) {
|
| 135 |
+
if (k_pivots_val <= 0) return 0;
|
| 136 |
+
if (k_pivots_val == 1) {
|
| 137 |
+
return (N_val > 1) ? (N_val - 1) : 0;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
double cost = 0;
|
| 141 |
+
cost += static_cast<double>(k_pivots_val) * estimate_log2(static_cast<double>(k_pivots_val));
|
| 142 |
+
for (int j = 2; j < k_pivots_val; ++j) {
|
| 143 |
+
if (j-1 > 0) cost += estimate_log2(static_cast<double>(j - 1));
|
| 144 |
+
}
|
| 145 |
+
cost += static_cast<double>(N_val - k_pivots_val) * estimate_log2(static_cast<double>(k_pivots_val));
|
| 146 |
+
return static_cast<int>(std::ceil(cost));
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
double calculate_variance_from_sums(double sum_sq_group_totals, double total_weight_double, int D_val) {
|
| 150 |
+
if (D_val <= 0) return 1e18;
|
| 151 |
+
double mean_weight = total_weight_double / D_val;
|
| 152 |
+
double variance = sum_sq_group_totals / D_val - mean_weight * mean_weight;
|
| 153 |
+
return std::max(0.0, variance);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
int main() {
|
| 158 |
+
std::ios_base::sync_with_stdio(false);
|
| 159 |
+
std::cin.tie(NULL);
|
| 160 |
+
|
| 161 |
+
program_start_time = std::chrono::steady_clock::now();
|
| 162 |
+
uint64_t random_seed = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now().time_since_epoch()).count();
|
| 163 |
+
rng_engine.seed(random_seed);
|
| 164 |
+
|
| 165 |
+
std::cin >> N_items_global >> D_groups_global >> Q_total_global;
|
| 166 |
+
|
| 167 |
+
std::vector<long long> estimated_weights(N_items_global);
|
| 168 |
+
|
| 169 |
+
int k_pivots_chosen = (N_items_global > 0) ? 1 : 0;
|
| 170 |
+
if (N_items_global > 1) {
|
| 171 |
+
for (int cur_k_val = N_items_global; cur_k_val >= 1; --cur_k_val) {
|
| 172 |
+
if (calculate_estimated_query_cost(N_items_global, cur_k_val) <= Q_total_global) {
|
| 173 |
+
k_pivots_chosen = cur_k_val;
|
| 174 |
+
break;
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
k_pivots_chosen = std::min(k_pivots_chosen, N_items_global);
|
| 179 |
+
if (N_items_global == 0) k_pivots_chosen = 0;
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
std::vector<int> pivot_item_indices(k_pivots_chosen);
|
| 183 |
+
if (k_pivots_chosen > 0) {
|
| 184 |
+
std::vector<int> all_item_indices_temp(N_items_global);
|
| 185 |
+
std::iota(all_item_indices_temp.begin(), all_item_indices_temp.end(), 0);
|
| 186 |
+
std::shuffle(all_item_indices_temp.begin(), all_item_indices_temp.end(), rng_engine);
|
| 187 |
+
for (int i = 0; i < k_pivots_chosen; ++i) pivot_item_indices[i] = all_item_indices_temp[i];
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
std::vector<int> sorted_pivot_item_indices = pivot_item_indices;
|
| 191 |
+
|
| 192 |
+
// Factors from previous attempt (more aggressive & symmetric):
|
| 193 |
+
const int FACTOR_GT_NUM = 200;
|
| 194 |
+
const int FACTOR_LT_NUM = 50;
|
| 195 |
+
const int FACTOR_XJ_FALLBACK_NUM = 100;
|
| 196 |
+
|
| 197 |
+
if (k_pivots_chosen == 0) {
|
| 198 |
+
for (int i = 0; i < N_items_global; ++i) estimated_weights[i] = BASE_WEIGHT;
|
| 199 |
+
} else if (k_pivots_chosen == 1) {
|
| 200 |
+
estimated_weights[pivot_item_indices[0]] = BASE_WEIGHT;
|
| 201 |
+
for (int i = 0; i < N_items_global; ++i) {
|
| 202 |
+
if (i == pivot_item_indices[0]) continue;
|
| 203 |
+
char res = compare_single_items(i, pivot_item_indices[0]);
|
| 204 |
+
if (res == '=') estimated_weights[i] = BASE_WEIGHT;
|
| 205 |
+
else if (res == '<') estimated_weights[i] = std::max(1LL, BASE_WEIGHT * FACTOR_LT_NUM / 100);
|
| 206 |
+
else estimated_weights[i] = std::max(1LL, BASE_WEIGHT * FACTOR_GT_NUM / 100);
|
| 207 |
+
}
|
| 208 |
+
} else { // k_pivots_chosen >= 2
|
| 209 |
+
merge_sort_items(sorted_pivot_item_indices, 0, k_pivots_chosen - 1);
|
| 210 |
+
|
| 211 |
+
int p0_idx = sorted_pivot_item_indices[0];
|
| 212 |
+
estimated_weights[p0_idx] = BASE_WEIGHT;
|
| 213 |
+
|
| 214 |
+
int p1_idx = sorted_pivot_item_indices[1];
|
| 215 |
+
char res_p1_vs_p0 = compare_single_items(p1_idx, p0_idx);
|
| 216 |
+
|
| 217 |
+
if (res_p1_vs_p0 == '=') {
|
| 218 |
+
estimated_weights[p1_idx] = estimated_weights[p0_idx];
|
| 219 |
+
} else if (res_p1_vs_p0 == '<') {
|
| 220 |
+
estimated_weights[p1_idx] = std::max(1LL, estimated_weights[p0_idx] * FACTOR_LT_NUM / 100);
|
| 221 |
+
} else {
|
| 222 |
+
estimated_weights[p1_idx] = std::max(1LL, estimated_weights[p0_idx] * FACTOR_GT_NUM / 100);
|
| 223 |
+
}
|
| 224 |
+
// Ensure monotonicity and strictness if comparison was strict
|
| 225 |
+
if (estimated_weights[p1_idx] < estimated_weights[p0_idx]) {
|
| 226 |
+
estimated_weights[p1_idx] = estimated_weights[p0_idx];
|
| 227 |
+
}
|
| 228 |
+
if (res_p1_vs_p0 == '>' && estimated_weights[p1_idx] == estimated_weights[p0_idx]) {
|
| 229 |
+
estimated_weights[p1_idx] = estimated_weights[p0_idx] + 1;
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
const long long MAX_XJ_INITIAL_HIGH_BOUND = BASE_WEIGHT * (1LL * N_items_global / std::max(1, D_groups_global) + 10); // Increased +5 to +10 for safety margin
|
| 233 |
+
|
| 234 |
+
for (int j = 2; j < k_pivots_chosen; ++j) {
|
| 235 |
+
int current_pivot_idx = sorted_pivot_item_indices[j];
|
| 236 |
+
int prev_pivot_idx = sorted_pivot_item_indices[j-1];
|
| 237 |
+
|
| 238 |
+
char res_curr_vs_prev = compare_single_items(current_pivot_idx, prev_pivot_idx);
|
| 239 |
+
if (res_curr_vs_prev == '=') {
|
| 240 |
+
estimated_weights[current_pivot_idx] = estimated_weights[prev_pivot_idx];
|
| 241 |
+
} else if (res_curr_vs_prev == '<') {
|
| 242 |
+
estimated_weights[current_pivot_idx] = std::max(1LL, estimated_weights[prev_pivot_idx] * FACTOR_LT_NUM / 100);
|
| 243 |
+
} else {
|
| 244 |
+
long long X_low_bound_val = 1;
|
| 245 |
+
long long X_high_bound_val = MAX_XJ_INITIAL_HIGH_BOUND;
|
| 246 |
+
bool x_low_modified = false;
|
| 247 |
+
bool x_high_modified = false;
|
| 248 |
+
|
| 249 |
+
int s_search_low_arr_idx = 0, s_search_high_arr_idx = j - 2;
|
| 250 |
+
|
| 251 |
+
int num_s_candidates = (s_search_high_arr_idx - s_search_low_arr_idx + 1);
|
| 252 |
+
int queries_for_this_Xj = 0;
|
| 253 |
+
if (num_s_candidates > 0) {
|
| 254 |
+
queries_for_this_Xj = static_cast<int>(std::ceil(estimate_log2(static_cast<double>(num_s_candidates))));
|
| 255 |
+
if (num_s_candidates == 1) queries_for_this_Xj = 1;
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
for(int bs_iter = 0; bs_iter < queries_for_this_Xj && queries_made < Q_total_global; ++bs_iter) {
|
| 259 |
+
if (s_search_low_arr_idx > s_search_high_arr_idx) break;
|
| 260 |
+
int s_mid_arr_idx = s_search_low_arr_idx + (s_search_high_arr_idx - s_search_low_arr_idx) / 2;
|
| 261 |
+
int item_s_aux_idx = sorted_pivot_item_indices[s_mid_arr_idx];
|
| 262 |
+
|
| 263 |
+
// Skip if s_aux is same as prev_pivot_idx; R items must be distinct for query.
|
| 264 |
+
// This should not happen if s_aux is chosen from p0...p_{j-2} and prev_pivot is p_{j-1}.
|
| 265 |
+
// if (item_s_aux_idx == prev_pivot_idx) continue; // Should not be necessary
|
| 266 |
+
|
| 267 |
+
char res_1v2 = compare_1v2_items_specific(current_pivot_idx, prev_pivot_idx, item_s_aux_idx);
|
| 268 |
+
|
| 269 |
+
if (res_1v2 == '=') {
|
| 270 |
+
X_low_bound_val = X_high_bound_val = estimated_weights[item_s_aux_idx];
|
| 271 |
+
x_low_modified = x_high_modified = true;
|
| 272 |
+
break;
|
| 273 |
+
} else if (res_1v2 == '<') {
|
| 274 |
+
X_high_bound_val = estimated_weights[item_s_aux_idx];
|
| 275 |
+
x_high_modified = true;
|
| 276 |
+
s_search_high_arr_idx = s_mid_arr_idx - 1;
|
| 277 |
+
} else { // res_1v2 == '>'
|
| 278 |
+
X_low_bound_val = estimated_weights[item_s_aux_idx];
|
| 279 |
+
x_low_modified = true;
|
| 280 |
+
s_search_low_arr_idx = s_mid_arr_idx + 1;
|
| 281 |
+
}
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
long long estimated_X_j;
|
| 285 |
+
if (x_low_modified && !x_high_modified) { // X_j > X_low_bound_val (max s_aux smaller than X_j)
|
| 286 |
+
estimated_X_j = X_low_bound_val * FACTOR_GT_NUM / 100;
|
| 287 |
+
} else if (!x_low_modified && x_high_modified) { // X_j < X_high_bound_val (min s_aux larger than X_j)
|
| 288 |
+
estimated_X_j = X_high_bound_val * FACTOR_LT_NUM / 100;
|
| 289 |
+
} else if (x_low_modified && x_high_modified) { // X_j is bracketed
|
| 290 |
+
// Reverted to ARITHMETIC MEAN for X_j
|
| 291 |
+
estimated_X_j = (X_low_bound_val + X_high_bound_val) / 2;
|
| 292 |
+
} else { // Fallback if binary search didn't narrow down X_j
|
| 293 |
+
estimated_X_j = estimated_weights[prev_pivot_idx] * FACTOR_XJ_FALLBACK_NUM / 100;
|
| 294 |
+
if (estimated_weights[prev_pivot_idx] > 0 && estimated_X_j == 0) estimated_X_j = 1;
|
| 295 |
+
else if (estimated_weights[prev_pivot_idx] == 0) {
|
| 296 |
+
estimated_X_j = std::max(1LL, BASE_WEIGHT * FACTOR_XJ_FALLBACK_NUM / 100);
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
estimated_X_j = std::max(1LL, estimated_X_j);
|
| 300 |
+
|
| 301 |
+
estimated_weights[current_pivot_idx] = estimated_weights[prev_pivot_idx] + estimated_X_j;
|
| 302 |
+
}
|
| 303 |
+
// Ensure monotonicity and strictness
|
| 304 |
+
if(estimated_weights[current_pivot_idx] < estimated_weights[prev_pivot_idx]) {
|
| 305 |
+
estimated_weights[current_pivot_idx] = estimated_weights[prev_pivot_idx];
|
| 306 |
+
}
|
| 307 |
+
if (res_curr_vs_prev == '>' && estimated_weights[current_pivot_idx] == estimated_weights[prev_pivot_idx]) {
|
| 308 |
+
estimated_weights[current_pivot_idx] = estimated_weights[prev_pivot_idx] + 1;
|
| 309 |
+
}
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
// Estimate weights for non-pivot items
|
| 313 |
+
for (int i=0; i<N_items_global; ++i) {
|
| 314 |
+
bool is_pivot_flag = false;
|
| 315 |
+
for(int p_idx_val=0; p_idx_val<k_pivots_chosen; ++p_idx_val) {
|
| 316 |
+
if(sorted_pivot_item_indices[p_idx_val] == i) {
|
| 317 |
+
is_pivot_flag = true;
|
| 318 |
+
break;
|
| 319 |
+
}
|
| 320 |
+
}
|
| 321 |
+
if (is_pivot_flag) continue;
|
| 322 |
+
|
| 323 |
+
int bs_low_arr_idx = 0, bs_high_arr_idx = k_pivots_chosen - 1;
|
| 324 |
+
int found_pivot_idx_for_eq = -1;
|
| 325 |
+
|
| 326 |
+
while(bs_low_arr_idx <= bs_high_arr_idx) {
|
| 327 |
+
if (queries_made >= Q_total_global && found_pivot_idx_for_eq == -1) break; // Stop if out of queries unless already found exact
|
| 328 |
+
int mid_p_arr_idx = bs_low_arr_idx + (bs_high_arr_idx - bs_low_arr_idx) / 2;
|
| 329 |
+
char res_item_vs_p = compare_single_items(i, sorted_pivot_item_indices[mid_p_arr_idx]);
|
| 330 |
+
|
| 331 |
+
if (res_item_vs_p == '=') {
|
| 332 |
+
found_pivot_idx_for_eq = mid_p_arr_idx;
|
| 333 |
+
break;
|
| 334 |
+
} else if (res_item_vs_p == '<') {
|
| 335 |
+
bs_high_arr_idx = mid_p_arr_idx - 1;
|
| 336 |
+
} else {
|
| 337 |
+
bs_low_arr_idx = mid_p_arr_idx + 1;
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
if (found_pivot_idx_for_eq != -1) {
|
| 342 |
+
estimated_weights[i] = estimated_weights[sorted_pivot_item_indices[found_pivot_idx_for_eq]];
|
| 343 |
+
continue;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
int insert_pos_arr_idx = bs_low_arr_idx;
|
| 347 |
+
|
| 348 |
+
if (insert_pos_arr_idx == 0) { // Smaller than p0
|
| 349 |
+
long long w_p0 = estimated_weights[sorted_pivot_item_indices[0]];
|
| 350 |
+
if (k_pivots_chosen >= 2) {
|
| 351 |
+
long long w_p1 = estimated_weights[sorted_pivot_item_indices[1]];
|
| 352 |
+
// Ensure w_p1 != 0 before division, and w_p0 must be < w_p1 for this extrapolation to make sense
|
| 353 |
+
if (w_p1 > w_p0 && w_p0 > 0 && w_p1 != 0) { // w_p1 should not be 0 if weights are >=1
|
| 354 |
+
estimated_weights[i] = std::max(1LL, w_p0 * w_p0 / w_p1);
|
| 355 |
+
} else {
|
| 356 |
+
estimated_weights[i] = std::max(1LL, w_p0 * FACTOR_LT_NUM / 100);
|
| 357 |
+
}
|
| 358 |
+
} else { // Only p0 exists
|
| 359 |
+
estimated_weights[i] = std::max(1LL, w_p0 * FACTOR_LT_NUM / 100);
|
| 360 |
+
}
|
| 361 |
+
} else if (insert_pos_arr_idx == k_pivots_chosen) { // Larger than p_{k-1}
|
| 362 |
+
long long w_pk_1 = estimated_weights[sorted_pivot_item_indices[k_pivots_chosen-1]];
|
| 363 |
+
if (k_pivots_chosen >= 2) {
|
| 364 |
+
long long w_pk_2 = estimated_weights[sorted_pivot_item_indices[k_pivots_chosen-2]];
|
| 365 |
+
// Ensure w_pk_2 != 0 and w_pk_2 < w_pk_1
|
| 366 |
+
if (w_pk_1 > w_pk_2 && w_pk_2 > 0 && w_pk_2 != 0) { // w_pk_2 should not be 0
|
| 367 |
+
estimated_weights[i] = std::max(1LL, w_pk_1 * w_pk_1 / w_pk_2);
|
| 368 |
+
} else {
|
| 369 |
+
estimated_weights[i] = std::max(1LL, w_pk_1 * FACTOR_GT_NUM / 100);
|
| 370 |
+
}
|
| 371 |
+
} else { // Only p0 exists (which is p_{k-1} here)
|
| 372 |
+
estimated_weights[i] = std::max(1LL, w_pk_1 * FACTOR_GT_NUM / 100);
|
| 373 |
+
}
|
| 374 |
+
} else { // Between p_{idx-1} and p_{idx}
|
| 375 |
+
long long w_prev_p = estimated_weights[sorted_pivot_item_indices[insert_pos_arr_idx-1]];
|
| 376 |
+
long long w_next_p = estimated_weights[sorted_pivot_item_indices[insert_pos_arr_idx]];
|
| 377 |
+
// Geometric mean for interpolation is generally preferred for exponential-like data
|
| 378 |
+
if (w_prev_p > 0 && w_next_p > 0) {
|
| 379 |
+
estimated_weights[i] = static_cast<long long>(std::sqrt(static_cast<double>(w_prev_p) * w_next_p));
|
| 380 |
+
} else { // Fallback for safety or if one weight is zero (should be >=1)
|
| 381 |
+
estimated_weights[i] = (w_prev_p + w_next_p) / 2;
|
| 382 |
+
}
|
| 383 |
+
// Ensure estimate is within the bounds of the two pivots it's between
|
| 384 |
+
estimated_weights[i] = std::max(w_prev_p, estimated_weights[i]);
|
| 385 |
+
estimated_weights[i] = std::min(w_next_p, estimated_weights[i]);
|
| 386 |
+
}
|
| 387 |
+
if (estimated_weights[i] <=0) estimated_weights[i] = 1;
|
| 388 |
+
}
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
// Final check: all weights must be at least 1.
|
| 392 |
+
for(int i=0; i<N_items_global; ++i) {
|
| 393 |
+
if (estimated_weights[i] <= 0) {
|
| 394 |
+
// This state indicates a flaw in estimation logic or extreme case.
|
| 395 |
+
// Fallback to a reasonable default like BASE_WEIGHT or 1.
|
| 396 |
+
// Previous version used BASE_WEIGHT. Smallest possible is 1.
|
| 397 |
+
// Using 1 might be safer if other weights are also small.
|
| 398 |
+
// However, if most are large, BASE_WEIGHT might be better.
|
| 399 |
+
// Sticking to previous fallback.
|
| 400 |
+
estimated_weights[i] = BASE_WEIGHT;
|
| 401 |
+
}
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
// Exhaust remaining queries
|
| 405 |
+
int dummy_item_0_idx = 0;
|
| 406 |
+
int dummy_item_1_idx = 1;
|
| 407 |
+
// N_items_global >= 30, so 0 and 1 are valid and distinct indices.
|
| 408 |
+
while(queries_made < Q_total_global) {
|
| 409 |
+
perform_query_actual({dummy_item_0_idx}, {dummy_item_1_idx});
|
| 410 |
+
// Cycle one of the items to make queries slightly different, though not critical for correctness
|
| 411 |
+
dummy_item_1_idx = (dummy_item_1_idx + 1) % N_items_global;
|
| 412 |
+
if (dummy_item_1_idx == dummy_item_0_idx) { // Ensure distinctness
|
| 413 |
+
dummy_item_1_idx = (dummy_item_1_idx + 1) % N_items_global;
|
| 414 |
+
}
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
// --- Assignment Phase: Greedy followed by Simulated Annealing ---
|
| 418 |
+
std::vector<int> assignment_array(N_items_global);
|
| 419 |
+
std::vector<long long> group_sums_array(D_groups_global, 0);
|
| 420 |
+
long long total_sum_est_val = 0;
|
| 421 |
+
|
| 422 |
+
std::vector<std::vector<int>> group_items_indices(D_groups_global);
|
| 423 |
+
std::vector<int> item_pos_in_group_vector(N_items_global);
|
| 424 |
+
|
| 425 |
+
std::vector<std::pair<long long, int>> items_sorted_for_greedy(N_items_global);
|
| 426 |
+
for(int i=0; i<N_items_global; ++i) {
|
| 427 |
+
items_sorted_for_greedy[i] = {-estimated_weights[i], i};
|
| 428 |
+
}
|
| 429 |
+
std::sort(items_sorted_for_greedy.begin(), items_sorted_for_greedy.end());
|
| 430 |
+
|
| 431 |
+
for(int i=0; i<N_items_global; ++i) {
|
| 432 |
+
int item_actual_idx = items_sorted_for_greedy[i].second;
|
| 433 |
+
long long item_w = estimated_weights[item_actual_idx];
|
| 434 |
+
int best_grp_current = 0;
|
| 435 |
+
if (D_groups_global > 1) {
|
| 436 |
+
long long min_sum_in_group = group_sums_array[0];
|
| 437 |
+
// Small optimization: if multiple groups have same min_sum, pick one randomly or by index
|
| 438 |
+
// Current logic picks smallest index. This is fine.
|
| 439 |
+
for(int j=1; j<D_groups_global; ++j) {
|
| 440 |
+
if (group_sums_array[j] < min_sum_in_group) {
|
| 441 |
+
min_sum_in_group = group_sums_array[j];
|
| 442 |
+
best_grp_current = j;
|
| 443 |
+
}
|
| 444 |
+
}
|
| 445 |
+
}
|
| 446 |
+
assignment_array[item_actual_idx] = best_grp_current;
|
| 447 |
+
group_sums_array[best_grp_current] += item_w;
|
| 448 |
+
group_items_indices[best_grp_current].push_back(item_actual_idx);
|
| 449 |
+
item_pos_in_group_vector[item_actual_idx] = group_items_indices[best_grp_current].size() - 1;
|
| 450 |
+
total_sum_est_val += item_w;
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
double current_sum_sq_group_totals = 0;
|
| 454 |
+
for(long long s : group_sums_array) {
|
| 455 |
+
current_sum_sq_group_totals += static_cast<double>(s) * s;
|
| 456 |
+
}
|
| 457 |
+
double current_var = calculate_variance_from_sums(current_sum_sq_group_totals, static_cast<double>(total_sum_est_val), D_groups_global);
|
| 458 |
+
|
| 459 |
+
// SA Parameters
|
| 460 |
+
double T_initial_factor = 0.25;
|
| 461 |
+
double T = std::max(1.0, current_var * T_initial_factor);
|
| 462 |
+
if (total_sum_est_val > 0 && current_var < 1e-9 && D_groups_global > 0) {
|
| 463 |
+
T = std::max(1.0, static_cast<double>(total_sum_est_val) / std::max(1,N_items_global) * 0.1);
|
| 464 |
+
} else if (total_sum_est_val == 0 && D_groups_global > 0) {
|
| 465 |
+
T = std::max(1.0, static_cast<double>(BASE_WEIGHT) * N_items_global / D_groups_global * 0.01 );
|
| 466 |
+
}
|
| 467 |
+
if (D_groups_global <= 1) T = 0;
|
| 468 |
+
|
| 469 |
+
double cool_rate = 0.9999;
|
| 470 |
+
int sa_iters_count = 0;
|
| 471 |
+
std::uniform_real_distribution<double> unif_dist(0.0, 1.0);
|
| 472 |
+
int no_improvement_streak = 0;
|
| 473 |
+
const int REHEAT_STREAK_THRESH_FACTOR = N_items_global > 50 ? 10 : 20;
|
| 474 |
+
const int CHECK_TIME_INTERVAL = 256;
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
while(D_groups_global > 1 && N_items_global > 0) {
|
| 478 |
+
sa_iters_count++;
|
| 479 |
+
if (sa_iters_count % CHECK_TIME_INTERVAL == 0) {
|
| 480 |
+
auto time_now = std::chrono::steady_clock::now();
|
| 481 |
+
if (std::chrono::duration_cast<std::chrono::milliseconds>(time_now - program_start_time) >= time_limit_ms) {
|
| 482 |
+
break;
|
| 483 |
+
}
|
| 484 |
+
T *= cool_rate;
|
| 485 |
+
if (no_improvement_streak > N_items_global * REHEAT_STREAK_THRESH_FACTOR && T < current_var * 0.05 && current_var > 1.0 + 1e-9) {
|
| 486 |
+
T = std::max(1.0, current_var * T_initial_factor * 0.5);
|
| 487 |
+
no_improvement_streak = 0;
|
| 488 |
+
}
|
| 489 |
+
}
|
| 490 |
+
if (T < 1e-12 && current_var > 1e-9) T = 1e-9; // Floor T if var high but T too low
|
| 491 |
+
if (T < 1e-12 && current_var < (1.0 + 1e-9)) break; // Converged or T too low
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
int move_type_rand_val = rng_engine();
|
| 495 |
+
// Adjust probability of swap vs relocate: 1/3 swap, 2/3 relocate
|
| 496 |
+
bool try_swap_move = ( (move_type_rand_val % 3 == 0) );
|
| 497 |
+
|
| 498 |
+
if (!try_swap_move) { // Relocate an item
|
| 499 |
+
if (N_items_global == 0) continue;
|
| 500 |
+
int item_to_move_idx = rng_engine() % N_items_global;
|
| 501 |
+
int old_grp_idx = assignment_array[item_to_move_idx];
|
| 502 |
+
|
| 503 |
+
if (D_groups_global <=1) continue;
|
| 504 |
+
int new_grp_idx = rng_engine() % D_groups_global;
|
| 505 |
+
while(new_grp_idx == old_grp_idx) new_grp_idx = rng_engine() % D_groups_global;
|
| 506 |
+
|
| 507 |
+
long long item_w_val = estimated_weights[item_to_move_idx];
|
| 508 |
+
|
| 509 |
+
long long old_sum_grp_A = group_sums_array[old_grp_idx];
|
| 510 |
+
long long old_sum_grp_B = group_sums_array[new_grp_idx];
|
| 511 |
+
long long new_sum_grp_A = old_sum_grp_A - item_w_val;
|
| 512 |
+
long long new_sum_grp_B = old_sum_grp_B + item_w_val;
|
| 513 |
+
|
| 514 |
+
double new_sum_sq_group_totals_cand = current_sum_sq_group_totals;
|
| 515 |
+
new_sum_sq_group_totals_cand -= static_cast<double>(old_sum_grp_A)*old_sum_grp_A + static_cast<double>(old_sum_grp_B)*old_sum_grp_B;
|
| 516 |
+
new_sum_sq_group_totals_cand += static_cast<double>(new_sum_grp_A)*new_sum_grp_A + static_cast<double>(new_sum_grp_B)*new_sum_grp_B;
|
| 517 |
+
double new_var = calculate_variance_from_sums(new_sum_sq_group_totals_cand, static_cast<double>(total_sum_est_val), D_groups_global);
|
| 518 |
+
|
| 519 |
+
double delta_V = new_var - current_var;
|
| 520 |
+
|
| 521 |
+
if (delta_V < 0 || (T > 1e-12 && unif_dist(rng_engine) < std::exp(-delta_V / T)) ) {
|
| 522 |
+
current_var = new_var;
|
| 523 |
+
current_sum_sq_group_totals = new_sum_sq_group_totals_cand;
|
| 524 |
+
group_sums_array[old_grp_idx] = new_sum_grp_A;
|
| 525 |
+
group_sums_array[new_grp_idx] = new_sum_grp_B;
|
| 526 |
+
assignment_array[item_to_move_idx] = new_grp_idx;
|
| 527 |
+
|
| 528 |
+
int pos_in_old_vec = item_pos_in_group_vector[item_to_move_idx];
|
| 529 |
+
if (!group_items_indices[old_grp_idx].empty()) {
|
| 530 |
+
int last_item_in_old_grp_vec = group_items_indices[old_grp_idx].back();
|
| 531 |
+
if (item_to_move_idx != last_item_in_old_grp_vec) {
|
| 532 |
+
group_items_indices[old_grp_idx][pos_in_old_vec] = last_item_in_old_grp_vec;
|
| 533 |
+
item_pos_in_group_vector[last_item_in_old_grp_vec] = pos_in_old_vec;
|
| 534 |
+
}
|
| 535 |
+
group_items_indices[old_grp_idx].pop_back();
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
group_items_indices[new_grp_idx].push_back(item_to_move_idx);
|
| 539 |
+
item_pos_in_group_vector[item_to_move_idx] = group_items_indices[new_grp_idx].size() - 1;
|
| 540 |
+
|
| 541 |
+
if (delta_V < -1e-9) no_improvement_streak = 0; else no_improvement_streak++;
|
| 542 |
+
} else {
|
| 543 |
+
no_improvement_streak++;
|
| 544 |
+
}
|
| 545 |
+
} else { // Try swap move
|
| 546 |
+
if (D_groups_global <= 1) continue;
|
| 547 |
+
|
| 548 |
+
int grp1_idx = rng_engine() % D_groups_global;
|
| 549 |
+
int grp2_idx = rng_engine() % D_groups_global;
|
| 550 |
+
while(grp2_idx == grp1_idx) grp2_idx = rng_engine() % D_groups_global;
|
| 551 |
+
|
| 552 |
+
if(group_items_indices[grp1_idx].empty() || group_items_indices[grp2_idx].empty()) {
|
| 553 |
+
no_improvement_streak++;
|
| 554 |
+
continue;
|
| 555 |
+
}
|
| 556 |
+
|
| 557 |
+
int item1_original_idx = group_items_indices[grp1_idx][rng_engine() % group_items_indices[grp1_idx].size()];
|
| 558 |
+
int item2_original_idx = group_items_indices[grp2_idx][rng_engine() % group_items_indices[grp2_idx].size()];
|
| 559 |
+
|
| 560 |
+
long long w1 = estimated_weights[item1_original_idx];
|
| 561 |
+
long long w2 = estimated_weights[item2_original_idx];
|
| 562 |
+
|
| 563 |
+
// If w1 == w2, swap has no effect on sums, so delta_V = 0.
|
| 564 |
+
// This move is only useful if it helps escape local minimum for other reasons,
|
| 565 |
+
// or if it's accepted by chance and enables further moves.
|
| 566 |
+
// If w1 == w2, delta_V will be 0. Acceptance depends on T (always if T>0).
|
| 567 |
+
// No need to explicitly check for w1==w2.
|
| 568 |
+
|
| 569 |
+
long long old_sum_grp1 = group_sums_array[grp1_idx];
|
| 570 |
+
long long old_sum_grp2 = group_sums_array[grp2_idx];
|
| 571 |
+
long long new_sum_grp1 = old_sum_grp1 - w1 + w2;
|
| 572 |
+
long long new_sum_grp2 = old_sum_grp2 - w2 + w1;
|
| 573 |
+
|
| 574 |
+
double new_sum_sq_group_totals_cand = current_sum_sq_group_totals;
|
| 575 |
+
new_sum_sq_group_totals_cand -= static_cast<double>(old_sum_grp1)*old_sum_grp1 + static_cast<double>(old_sum_grp2)*old_sum_grp2;
|
| 576 |
+
new_sum_sq_group_totals_cand += static_cast<double>(new_sum_grp1)*new_sum_grp1 + static_cast<double>(new_sum_grp2)*new_sum_grp2;
|
| 577 |
+
double new_var = calculate_variance_from_sums(new_sum_sq_group_totals_cand, static_cast<double>(total_sum_est_val), D_groups_global);
|
| 578 |
+
|
| 579 |
+
double delta_V = new_var - current_var;
|
| 580 |
+
|
| 581 |
+
if (delta_V < 0 || (T > 1e-12 && unif_dist(rng_engine) < std::exp(-delta_V / T)) ) {
|
| 582 |
+
current_var = new_var;
|
| 583 |
+
current_sum_sq_group_totals = new_sum_sq_group_totals_cand;
|
| 584 |
+
group_sums_array[grp1_idx] = new_sum_grp1;
|
| 585 |
+
group_sums_array[grp2_idx] = new_sum_grp2;
|
| 586 |
+
|
| 587 |
+
assignment_array[item1_original_idx] = grp2_idx;
|
| 588 |
+
assignment_array[item2_original_idx] = grp1_idx;
|
| 589 |
+
|
| 590 |
+
// Update item tracking structures
|
| 591 |
+
int pos1_in_G1 = item_pos_in_group_vector[item1_original_idx];
|
| 592 |
+
// group_items_indices[grp1_idx] cannot be empty here as item1 was picked from it.
|
| 593 |
+
int back1_of_G1 = group_items_indices[grp1_idx].back();
|
| 594 |
+
if (item1_original_idx != back1_of_G1) {
|
| 595 |
+
group_items_indices[grp1_idx][pos1_in_G1] = back1_of_G1;
|
| 596 |
+
item_pos_in_group_vector[back1_of_G1] = pos1_in_G1;
|
| 597 |
+
}
|
| 598 |
+
group_items_indices[grp1_idx].pop_back();
|
| 599 |
+
|
| 600 |
+
int pos2_in_G2 = item_pos_in_group_vector[item2_original_idx];
|
| 601 |
+
int back2_of_G2 = group_items_indices[grp2_idx].back();
|
| 602 |
+
if (item2_original_idx != back2_of_G2) {
|
| 603 |
+
group_items_indices[grp2_idx][pos2_in_G2] = back2_of_G2;
|
| 604 |
+
item_pos_in_group_vector[back2_of_G2] = pos2_in_G2;
|
| 605 |
+
}
|
| 606 |
+
group_items_indices[grp2_idx].pop_back();
|
| 607 |
+
|
| 608 |
+
group_items_indices[grp2_idx].push_back(item1_original_idx);
|
| 609 |
+
item_pos_in_group_vector[item1_original_idx] = group_items_indices[grp2_idx].size() - 1;
|
| 610 |
+
|
| 611 |
+
group_items_indices[grp1_idx].push_back(item2_original_idx);
|
| 612 |
+
item_pos_in_group_vector[item2_original_idx] = group_items_indices[grp1_idx].size() - 1;
|
| 613 |
+
|
| 614 |
+
if (delta_V < -1e-9) no_improvement_streak = 0; else no_improvement_streak++;
|
| 615 |
+
} else {
|
| 616 |
+
no_improvement_streak++;
|
| 617 |
+
}
|
| 618 |
+
}
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
for (int i = 0; i < N_items_global; ++i) {
|
| 622 |
+
std::cout << assignment_array[i] << (i == N_items_global - 1 ? "" : " ");
|
| 623 |
+
}
|
| 624 |
+
std::cout << std::endl;
|
| 625 |
+
|
| 626 |
+
return 0;
|
| 627 |
+
}
|
| 628 |
+
# EVOLVE-BLOCK-END
|