maitri01 commited on
Commit
359a2cc
·
verified ·
1 Parent(s): b4cb1b6

Update task_template.py

Browse files
Files changed (1) hide show
  1. task_template.py +19 -30
task_template.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Task 2 — Attribute Inference: Sample Submission
3
  =================================================
4
 
5
  Goal
@@ -36,35 +36,21 @@ Rate limits on the oracle endpoint
36
 
37
  import os
38
  import sys
39
- import time
40
 
41
  import numpy as np
42
  import pandas as pd
43
  import requests
44
 
 
45
  # ---------------------------------------------------------------------------
46
  # Configuration — fill these in before running
47
  # ---------------------------------------------------------------------------
48
 
49
  BASE_URL = "http://35.192.205.84:80"
50
- API_KEY = "your-api-key-here" # replace with your team's API key
51
  TASK_ID = "25-attribute-inference"
52
- ZIP_FILE = "Dataset.zip" # Path to dataset zip file
53
- DATASET_DIR = Path("Dataset") # Folder after extraction
54
  OUTPUT_PATH = "submission.csv"
55
-
56
- # ----------------------------
57
- # UNZIP DATASET
58
- # ----------------------------
59
- if not DATASET_DIR.exists():
60
- print("Unzipping dataset...")
61
- with zipfile.ZipFile(ZIP_FILE, "r") as zip_ref:
62
- zip_ref.extractall(DATASET_DIR)
63
- else:
64
- print("Dataset already extracted.")
65
-
66
-
67
- TARGET_FEATURES_PATH = "Dataset/target_features.csv"
68
 
69
  QUERY_ORACLE = True # set True to query the oracle endpoint
70
  SUBMIT = False # set True to submit OUTPUT_PATH to the evaluator
@@ -79,6 +65,8 @@ LABELS = [
79
  ]
80
  PROB_COLUMNS = [f"{l}_prob" for l in LABELS]
81
  FEATURE_COLUMNS = ["education", "educational-num", "race", "gender", "native-country"]
 
 
82
 
83
  # ---------------------------------------------------------------------------
84
  # Helpers
@@ -98,16 +86,14 @@ def query_oracle(model_id: int, feature_dicts: list[dict]) -> list[list[float]]:
98
  """
99
  resp = requests.post(
100
  f"{BASE_URL}/25-attribute-inference/predict",
101
- json={"model_id": model_id, "features": feature_dicts},
102
  headers={"X-API-Key": API_KEY},
103
  timeout=(10, 60),
104
  )
105
  if resp.status_code == 429:
106
- detail = resp.json().get("detail", "")
107
- die(f"Rate limited on model {model_id}: {detail}")
108
  resp.raise_for_status()
109
- data = resp.json()
110
- return data["probabilities"] # list of [p_<=50K, p_>50K]
111
 
112
 
113
  # ---------------------------------------------------------------------------
@@ -128,7 +114,14 @@ oracle_probs: dict[int, list[list[float]]] = {}
128
  if QUERY_ORACLE:
129
  for model_id in sorted(target_df["model_id"].unique()):
130
  subset = target_df[target_df["model_id"] == model_id].sort_values("datapoint_id")
131
- feature_dicts = subset[FEATURE_COLUMNS].to_dict(orient="records")
 
 
 
 
 
 
 
132
 
133
  print(f"Querying oracle for model {model_id} ({len(feature_dicts)} records)...")
134
  probs = query_oracle(model_id, feature_dicts)
@@ -157,19 +150,15 @@ for model_id in sorted(target_df["model_id"].unique()):
157
  marital_probs = np.ones(len(LABELS)) / len(LABELS)
158
  # ---------------------------------
159
 
160
- row = {
161
- "model_id": model_id,
162
- "datapoint_id": int(record["datapoint_id"]),
163
- }
164
  for label, prob in zip(LABELS, marital_probs):
165
  row[f"{label}_prob"] = float(prob)
166
  rows.append(row)
167
 
168
  submission = pd.DataFrame(rows, columns=["model_id", "datapoint_id"] + PROB_COLUMNS)
169
 
170
- # Sanity checks
171
  assert len(submission) == 1000, f"Expected 1000 rows, got {len(submission)}"
172
- assert np.allclose(submission[PROB_COLUMNS].sum(axis=1), 1.0), \
173
  "Probabilities do not sum to 1 in all rows."
174
 
175
  submission.to_csv(OUTPUT_PATH, index=False)
 
1
  """
2
+ Task 2 — Attribute Inference: Querying and Sample Submission
3
  =================================================
4
 
5
  Goal
 
36
 
37
  import os
38
  import sys
 
39
 
40
  import numpy as np
41
  import pandas as pd
42
  import requests
43
 
44
+
45
  # ---------------------------------------------------------------------------
46
  # Configuration — fill these in before running
47
  # ---------------------------------------------------------------------------
48
 
49
  BASE_URL = "http://35.192.205.84:80"
50
+ API_KEY = "<YOUR_API_KEY>"
51
  TASK_ID = "25-attribute-inference"
 
 
52
  OUTPUT_PATH = "submission.csv"
53
+ TARGET_FEATURES_PATH = "target_features.csv"
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  QUERY_ORACLE = True # set True to query the oracle endpoint
56
  SUBMIT = False # set True to submit OUTPUT_PATH to the evaluator
 
65
  ]
66
  PROB_COLUMNS = [f"{l}_prob" for l in LABELS]
67
  FEATURE_COLUMNS = ["education", "educational-num", "race", "gender", "native-country"]
68
+ NULL_KEYS = ["age", "capital-gain", "capital-loss", "hours-per-week",
69
+ "marital-status", "occupation", "workclass"]
70
 
71
  # ---------------------------------------------------------------------------
72
  # Helpers
 
86
  """
87
  resp = requests.post(
88
  f"{BASE_URL}/25-attribute-inference/predict",
89
+ json={"model_id": int(model_id), "features": feature_dicts},
90
  headers={"X-API-Key": API_KEY},
91
  timeout=(10, 60),
92
  )
93
  if resp.status_code == 429:
94
+ die(f"Rate limited on model {model_id}: {resp.json().get('detail', '')}")
 
95
  resp.raise_for_status()
96
+ return resp.json()["probabilities"] # list of [p_<=50K, p_>50K]
 
97
 
98
 
99
  # ---------------------------------------------------------------------------
 
114
  if QUERY_ORACLE:
115
  for model_id in sorted(target_df["model_id"].unique()):
116
  subset = target_df[target_df["model_id"] == model_id].sort_values("datapoint_id")
117
+ feature_dicts = (
118
+ subset[FEATURE_COLUMNS]
119
+ .where(pd.notna(subset[FEATURE_COLUMNS]), None)
120
+ .to_dict(orient="records")
121
+ )
122
+ for d in feature_dicts:
123
+ for k in NULL_KEYS:
124
+ d[k] = None
125
 
126
  print(f"Querying oracle for model {model_id} ({len(feature_dicts)} records)...")
127
  probs = query_oracle(model_id, feature_dicts)
 
150
  marital_probs = np.ones(len(LABELS)) / len(LABELS)
151
  # ---------------------------------
152
 
153
+ row = {"model_id": model_id, "datapoint_id": int(record["datapoint_id"])}
 
 
 
154
  for label, prob in zip(LABELS, marital_probs):
155
  row[f"{label}_prob"] = float(prob)
156
  rows.append(row)
157
 
158
  submission = pd.DataFrame(rows, columns=["model_id", "datapoint_id"] + PROB_COLUMNS)
159
 
 
160
  assert len(submission) == 1000, f"Expected 1000 rows, got {len(submission)}"
161
+ assert np.allclose(submission[PROB_COLUMNS].to_numpy().sum(axis=1), 1.0), \
162
  "Probabilities do not sum to 1 in all rows."
163
 
164
  submission.to_csv(OUTPUT_PATH, index=False)