Update task_template.py
Browse files- task_template.py +219 -0
task_template.py
CHANGED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Task 25 β Attribute Inference: Sample Submission
|
| 3 |
+
=================================================
|
| 4 |
+
|
| 5 |
+
Goal
|
| 6 |
+
----
|
| 7 |
+
Infer the `marital-status` of 100 records per model (10 models = 1 000 rows
|
| 8 |
+
total) using only the 5 observable features in target_features.csv.
|
| 9 |
+
|
| 10 |
+
You can query each model's oracle endpoint to get income predictions as an
|
| 11 |
+
additional signal. This sample shows:
|
| 12 |
+
1. How to query the oracle endpoint (POST /25-attribute-inference/predict).
|
| 13 |
+
2. How to build a valid submission CSV.
|
| 14 |
+
3. How to submit it to the evaluation endpoint.
|
| 15 |
+
|
| 16 |
+
Submission CSV format
|
| 17 |
+
---------------------
|
| 18 |
+
Columns (exact order):
|
| 19 |
+
model_id, datapoint_id,
|
| 20 |
+
Divorced_prob, Married-civ-spouse_prob, Married-spouse-absent_prob,
|
| 21 |
+
Never-married_prob, Separated_prob, Widowed_prob
|
| 22 |
+
|
| 23 |
+
Constraints:
|
| 24 |
+
- One row per (model_id, datapoint_id) pair β 1 000 rows total.
|
| 25 |
+
- model_id β {0, β¦, 9}, datapoint_id β {0, β¦, 99}.
|
| 26 |
+
- All six probability columns must be floats in [0, 1].
|
| 27 |
+
- Each row's six probabilities must sum to 1.
|
| 28 |
+
|
| 29 |
+
Rate limits on the oracle endpoint
|
| 30 |
+
-----------------------------------
|
| 31 |
+
- 900 s cooldown per model after a successful query.
|
| 32 |
+
- 120 s cooldown per model after a failed query.
|
| 33 |
+
- Each model has its own independent cooldown.
|
| 34 |
+
β You can query all 10 models without waiting, but only once per 15 min per model.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
import os
|
| 38 |
+
import sys
|
| 39 |
+
import time
|
| 40 |
+
|
| 41 |
+
import numpy as np
|
| 42 |
+
import pandas as pd
|
| 43 |
+
import requests
|
| 44 |
+
|
| 45 |
+
# ---------------------------------------------------------------------------
|
| 46 |
+
# Configuration β fill these in before running
|
| 47 |
+
# ---------------------------------------------------------------------------
|
| 48 |
+
|
| 49 |
+
BASE_URL = "http://35.192.205.84:80"
|
| 50 |
+
API_KEY = "your-api-key-here" # replace with your team's API key
|
| 51 |
+
TASK_ID = "25-attribute-inference"
|
| 52 |
+
ZIP_FILE = "Dataset.zip" # Path to dataset zip file
|
| 53 |
+
DATASET_DIR = Path("Dataset") # Folder after extraction
|
| 54 |
+
OUTPUT_PATH = "submission.csv"
|
| 55 |
+
|
| 56 |
+
# ----------------------------
|
| 57 |
+
# UNZIP DATASET
|
| 58 |
+
# ----------------------------
|
| 59 |
+
if not DATASET_DIR.exists():
|
| 60 |
+
print("Unzipping dataset...")
|
| 61 |
+
with zipfile.ZipFile(ZIP_FILE, "r") as zip_ref:
|
| 62 |
+
zip_ref.extractall(DATASET_DIR)
|
| 63 |
+
else:
|
| 64 |
+
print("Dataset already extracted.")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
TARGET_FEATURES_PATH = "Datatset/target_features.csv"
|
| 68 |
+
|
| 69 |
+
QUERY_ORACLE = True # set True to query the oracle endpoint
|
| 70 |
+
SUBMIT = False # set True to submit OUTPUT_PATH to the evaluator
|
| 71 |
+
|
| 72 |
+
LABELS = [
|
| 73 |
+
"Divorced",
|
| 74 |
+
"Married-civ-spouse",
|
| 75 |
+
"Married-spouse-absent",
|
| 76 |
+
"Never-married",
|
| 77 |
+
"Separated",
|
| 78 |
+
"Widowed",
|
| 79 |
+
]
|
| 80 |
+
PROB_COLUMNS = [f"{l}_prob" for l in LABELS]
|
| 81 |
+
FEATURE_COLUMNS = ["education", "educational-num", "race", "gender", "native-country"]
|
| 82 |
+
|
| 83 |
+
# ---------------------------------------------------------------------------
|
| 84 |
+
# Helpers
|
| 85 |
+
# ---------------------------------------------------------------------------
|
| 86 |
+
|
| 87 |
+
def die(msg: str) -> None:
|
| 88 |
+
print(msg, file=sys.stderr)
|
| 89 |
+
sys.exit(1)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def query_oracle(model_id: int, feature_dicts: list[dict]) -> list[list[float]]:
|
| 93 |
+
"""
|
| 94 |
+
Query the oracle endpoint for one model.
|
| 95 |
+
|
| 96 |
+
Returns a list of [p_<=50K, p_>50K] per record.
|
| 97 |
+
Raises on HTTP errors.
|
| 98 |
+
"""
|
| 99 |
+
resp = requests.post(
|
| 100 |
+
f"{BASE_URL}/25-attribute-inference/predict",
|
| 101 |
+
json={"model_id": model_id, "features": feature_dicts},
|
| 102 |
+
headers={"X-API-Key": API_KEY},
|
| 103 |
+
timeout=(10, 60),
|
| 104 |
+
)
|
| 105 |
+
if resp.status_code == 429:
|
| 106 |
+
detail = resp.json().get("detail", "")
|
| 107 |
+
die(f"Rate limited on model {model_id}: {detail}")
|
| 108 |
+
resp.raise_for_status()
|
| 109 |
+
data = resp.json()
|
| 110 |
+
return data["probabilities"] # list of [p_<=50K, p_>50K]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# ---------------------------------------------------------------------------
|
| 114 |
+
# Step 1 β Load target features
|
| 115 |
+
# ---------------------------------------------------------------------------
|
| 116 |
+
|
| 117 |
+
target_df = pd.read_csv(TARGET_FEATURES_PATH)
|
| 118 |
+
print(f"Loaded {len(target_df)} target records across "
|
| 119 |
+
f"{target_df['model_id'].nunique()} models.")
|
| 120 |
+
|
| 121 |
+
# ---------------------------------------------------------------------------
|
| 122 |
+
# Step 2 β Optionally query the oracle for income probabilities
|
| 123 |
+
# ---------------------------------------------------------------------------
|
| 124 |
+
|
| 125 |
+
# oracle_probs[model_id] = list of [p_<=50K, p_>50K] for that model's records
|
| 126 |
+
oracle_probs: dict[int, list[list[float]]] = {}
|
| 127 |
+
|
| 128 |
+
if QUERY_ORACLE:
|
| 129 |
+
for model_id in sorted(target_df["model_id"].unique()):
|
| 130 |
+
subset = target_df[target_df["model_id"] == model_id].sort_values("datapoint_id")
|
| 131 |
+
feature_dicts = subset[FEATURE_COLUMNS].to_dict(orient="records")
|
| 132 |
+
|
| 133 |
+
print(f"Querying oracle for model {model_id} ({len(feature_dicts)} records)...")
|
| 134 |
+
probs = query_oracle(model_id, feature_dicts)
|
| 135 |
+
oracle_probs[model_id] = probs
|
| 136 |
+
print(f" β received income probabilities, e.g. first record: {probs[0]}")
|
| 137 |
+
|
| 138 |
+
# ---------------------------------------------------------------------------
|
| 139 |
+
# Step 3 β Build marital-status predictions
|
| 140 |
+
#
|
| 141 |
+
# Replace this section with your actual inference logic.
|
| 142 |
+
# This baseline simply assigns uniform probabilities regardless of oracle output.
|
| 143 |
+
# A better approach would use oracle income predictions + features as signals.
|
| 144 |
+
# ---------------------------------------------------------------------------
|
| 145 |
+
|
| 146 |
+
rows = []
|
| 147 |
+
for model_id in sorted(target_df["model_id"].unique()):
|
| 148 |
+
subset = target_df[target_df["model_id"] == model_id].sort_values("datapoint_id")
|
| 149 |
+
|
| 150 |
+
for i, (_, record) in enumerate(subset.iterrows()):
|
| 151 |
+
# --- YOUR INFERENCE LOGIC HERE ---
|
| 152 |
+
# You have access to:
|
| 153 |
+
# record[FEATURE_COLUMNS] β the 5 observable features
|
| 154 |
+
# oracle_probs[model_id][i] β income probs [p_<=50K, p_>50K] (if queried)
|
| 155 |
+
#
|
| 156 |
+
# Baseline: uniform distribution over all 6 marital-status labels.
|
| 157 |
+
marital_probs = np.ones(len(LABELS)) / len(LABELS)
|
| 158 |
+
# ---------------------------------
|
| 159 |
+
|
| 160 |
+
row = {
|
| 161 |
+
"model_id": model_id,
|
| 162 |
+
"datapoint_id": int(record["datapoint_id"]),
|
| 163 |
+
}
|
| 164 |
+
for label, prob in zip(LABELS, marital_probs):
|
| 165 |
+
row[f"{label}_prob"] = float(prob)
|
| 166 |
+
rows.append(row)
|
| 167 |
+
|
| 168 |
+
submission = pd.DataFrame(rows, columns=["model_id", "datapoint_id"] + PROB_COLUMNS)
|
| 169 |
+
|
| 170 |
+
# Sanity checks
|
| 171 |
+
assert len(submission) == 1000, f"Expected 1000 rows, got {len(submission)}"
|
| 172 |
+
assert np.allclose(submission[PROB_COLUMNS].sum(axis=1), 1.0), \
|
| 173 |
+
"Probabilities do not sum to 1 in all rows."
|
| 174 |
+
|
| 175 |
+
submission.to_csv(OUTPUT_PATH, index=False)
|
| 176 |
+
print(f"\nSubmission saved to '{OUTPUT_PATH}' ({len(submission)} rows).")
|
| 177 |
+
print(submission.head(3).to_string(index=False))
|
| 178 |
+
|
| 179 |
+
# ---------------------------------------------------------------------------
|
| 180 |
+
# Step 4 β Submit to the evaluation endpoint
|
| 181 |
+
# ---------------------------------------------------------------------------
|
| 182 |
+
|
| 183 |
+
if SUBMIT:
|
| 184 |
+
if not os.path.isfile(OUTPUT_PATH):
|
| 185 |
+
die(f"Submission file not found: {OUTPUT_PATH}")
|
| 186 |
+
|
| 187 |
+
print(f"\nSubmitting '{OUTPUT_PATH}' to /{TASK_ID}...")
|
| 188 |
+
try:
|
| 189 |
+
with open(OUTPUT_PATH, "rb") as f:
|
| 190 |
+
resp = requests.post(
|
| 191 |
+
f"{BASE_URL}/submit/{TASK_ID}",
|
| 192 |
+
headers={"X-API-Key": API_KEY},
|
| 193 |
+
files={"file": (os.path.basename(OUTPUT_PATH), f, "text/csv")},
|
| 194 |
+
timeout=(10, 120),
|
| 195 |
+
)
|
| 196 |
+
try:
|
| 197 |
+
body = resp.json()
|
| 198 |
+
except Exception:
|
| 199 |
+
body = {"raw_text": resp.text}
|
| 200 |
+
|
| 201 |
+
if resp.status_code == 413:
|
| 202 |
+
die("Upload rejected: file too large (HTTP 413).")
|
| 203 |
+
|
| 204 |
+
resp.raise_for_status()
|
| 205 |
+
|
| 206 |
+
print("Successfully submitted.")
|
| 207 |
+
print("Server response:", body)
|
| 208 |
+
if body.get("submission_id"):
|
| 209 |
+
print(f"Submission ID: {body['submission_id']}")
|
| 210 |
+
|
| 211 |
+
except requests.exceptions.RequestException as e:
|
| 212 |
+
detail = getattr(e, "response", None)
|
| 213 |
+
print(f"Submission error: {e}")
|
| 214 |
+
if detail is not None:
|
| 215 |
+
try:
|
| 216 |
+
print("Server response:", detail.json())
|
| 217 |
+
except Exception:
|
| 218 |
+
print("Server response (text):", detail.text)
|
| 219 |
+
sys.exit(1)
|