| """ |
| Task 2 β Attribute Inference: Querying and Sample Submission |
| ================================================= |
| |
| Goal |
| ---- |
| Infer the `marital-status` of 100 records per model (10 models = 1 000 rows |
| total) using only the 5 observable features in target_features.csv. |
| |
| You can query each model's oracle endpoint to get income predictions as an |
| additional signal. This sample shows: |
| 1. How to query the oracle endpoint (POST /25-attribute-inference/predict). |
| 2. How to build a valid submission CSV. |
| 3. How to submit it to the evaluation endpoint. |
| |
| Submission CSV format |
| --------------------- |
| Columns (exact order): |
| model_id, datapoint_id, |
| Divorced_prob, Married-civ-spouse_prob, Married-spouse-absent_prob, |
| Never-married_prob, Separated_prob, Widowed_prob |
| |
| Constraints: |
| - One row per (model_id, datapoint_id) pair β 1 000 rows total. |
| - model_id β {0, β¦, 9}, datapoint_id β {0, β¦, 99}. |
| - All six probability columns must be floats in [0, 1]. |
| - Each row's six probabilities must sum to 1. |
| |
| Rate limits on the oracle endpoint |
| ----------------------------------- |
| - 900 s cooldown per model after a successful query. |
| - 120 s cooldown per model after a failed query. |
| - Each model has its own independent cooldown. |
| β You can query all 10 models without waiting, but only once per 15 min per model. |
| """ |
|
|
| import os |
| import sys |
|
|
| import numpy as np |
| import pandas as pd |
| import requests |
|
|
|
|
| |
| |
| |
|
|
| BASE_URL = "http://35.192.205.84:80" |
| API_KEY = "<YOUR_API_KEY>" |
| TASK_ID = "25-attribute-inference" |
| OUTPUT_PATH = "submission.csv" |
| TARGET_FEATURES_PATH = "target_features.csv" |
|
|
| QUERY_ORACLE = True |
| SUBMIT = False |
|
|
| LABELS = [ |
| "Divorced", |
| "Married-civ-spouse", |
| "Married-spouse-absent", |
| "Never-married", |
| "Separated", |
| "Widowed", |
| ] |
| PROB_COLUMNS = [f"{l}_prob" for l in LABELS] |
| FEATURE_COLUMNS = ["education", "educational-num", "race", "gender", "native-country"] |
| NULL_KEYS = ["age", "capital-gain", "capital-loss", "hours-per-week", |
| "marital-status", "occupation", "workclass"] |
|
|
| |
| |
| |
|
|
| def die(msg: str) -> None: |
| print(msg, file=sys.stderr) |
| sys.exit(1) |
|
|
|
|
| def query_oracle(model_id: int, feature_dicts: list[dict]) -> list[list[float]]: |
| """ |
| Query the oracle endpoint for one model. |
| |
| Returns a list of [p_<=50K, p_>50K] per record. |
| Raises on HTTP errors. |
| """ |
| resp = requests.post( |
| f"{BASE_URL}/25-attribute-inference/predict", |
| json={"model_id": int(model_id), "features": feature_dicts}, |
| headers={"X-API-Key": API_KEY}, |
| timeout=(10, 60), |
| ) |
| if resp.status_code == 429: |
| die(f"Rate limited on model {model_id}: {resp.json().get('detail', '')}") |
| resp.raise_for_status() |
| return resp.json()["probabilities"] |
|
|
|
|
| |
| |
| |
|
|
| target_df = pd.read_csv(TARGET_FEATURES_PATH) |
| print(f"Loaded {len(target_df)} target records across " |
| f"{target_df['model_id'].nunique()} models.") |
|
|
| |
| |
| |
|
|
| |
| oracle_probs: dict[int, list[list[float]]] = {} |
|
|
| if QUERY_ORACLE: |
| for model_id in sorted(target_df["model_id"].unique()): |
| subset = target_df[target_df["model_id"] == model_id].sort_values("datapoint_id") |
| feature_dicts = ( |
| subset[FEATURE_COLUMNS] |
| .where(pd.notna(subset[FEATURE_COLUMNS]), None) |
| .to_dict(orient="records") |
| ) |
| for d in feature_dicts: |
| for k in NULL_KEYS: |
| d[k] = None |
|
|
| print(f"Querying oracle for model {model_id} ({len(feature_dicts)} records)...") |
| probs = query_oracle(model_id, feature_dicts) |
| oracle_probs[model_id] = probs |
| print(f" β received income probabilities, e.g. first record: {probs[0]}") |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| rows = [] |
| for model_id in sorted(target_df["model_id"].unique()): |
| subset = target_df[target_df["model_id"] == model_id].sort_values("datapoint_id") |
|
|
| for i, (_, record) in enumerate(subset.iterrows()): |
| |
| |
| |
| |
| |
| |
| marital_probs = np.ones(len(LABELS)) / len(LABELS) |
| |
|
|
| row = {"model_id": model_id, "datapoint_id": int(record["datapoint_id"])} |
| for label, prob in zip(LABELS, marital_probs): |
| row[f"{label}_prob"] = float(prob) |
| rows.append(row) |
|
|
| submission = pd.DataFrame(rows, columns=["model_id", "datapoint_id"] + PROB_COLUMNS) |
|
|
| assert len(submission) == 1000, f"Expected 1000 rows, got {len(submission)}" |
| assert np.allclose(submission[PROB_COLUMNS].to_numpy().sum(axis=1), 1.0), \ |
| "Probabilities do not sum to 1 in all rows." |
|
|
| submission.to_csv(OUTPUT_PATH, index=False) |
| print(f"\nSubmission saved to '{OUTPUT_PATH}' ({len(submission)} rows).") |
| print(submission.head(3).to_string(index=False)) |
|
|
| |
| |
| |
|
|
| if SUBMIT: |
| if not os.path.isfile(OUTPUT_PATH): |
| die(f"Submission file not found: {OUTPUT_PATH}") |
|
|
| print(f"\nSubmitting '{OUTPUT_PATH}' to /{TASK_ID}...") |
| try: |
| with open(OUTPUT_PATH, "rb") as f: |
| resp = requests.post( |
| f"{BASE_URL}/submit/{TASK_ID}", |
| headers={"X-API-Key": API_KEY}, |
| files={"file": (os.path.basename(OUTPUT_PATH), f, "text/csv")}, |
| timeout=(10, 120), |
| ) |
| try: |
| body = resp.json() |
| except Exception: |
| body = {"raw_text": resp.text} |
|
|
| if resp.status_code == 413: |
| die("Upload rejected: file too large (HTTP 413).") |
|
|
| resp.raise_for_status() |
|
|
| print("Successfully submitted.") |
| print("Server response:", body) |
| if body.get("submission_id"): |
| print(f"Submission ID: {body['submission_id']}") |
|
|
| except requests.exceptions.RequestException as e: |
| detail = getattr(e, "response", None) |
| print(f"Submission error: {e}") |
| if detail is not None: |
| try: |
| print("Server response:", detail.json()) |
| except Exception: |
| print("Server response (text):", detail.text) |
| sys.exit(1) |