kt-test-account commited on
Commit
f646948
Β·
1 Parent(s): f7c59cc

adding cmaps

Browse files
Files changed (2) hide show
  1. app.py +7 -6
  2. utils.py +28 -11
app.py CHANGED
@@ -14,7 +14,8 @@ TASKS = {
14
  "video-challenge-task-2-config": ["source", "category"],
15
  }
16
  valid_splits = ["public", "private"]
17
-
 
18
 
19
  #####################################################################
20
  ## Data loading ##
@@ -140,7 +141,7 @@ def make_roc_curves(task, submission_ids):
140
  # if rocs["team"].nunique() > 1:
141
  color_field = "team:N"
142
 
143
- roc_chart = alt.Chart(rocs).mark_line().encode(x="fpr", y="tpr", color=color_field, detail="submission_id:N")
144
 
145
  return roc_chart
146
 
@@ -419,7 +420,7 @@ def make_roc(results, show_text=False):
419
  .encode(
420
  x=alt.X("FA:Q", title="πŸ§‘β€πŸŽ€ False Positive Rate", scale=alt.Scale(domain=[0.0, 1.0])),
421
  y=alt.Y("generated_accuracy:Q", title="πŸ‘€ True Positive Rate", scale=alt.Scale(domain=[0.0, 1.0])),
422
- color="team:N", # Color by categorical field
423
  size=alt.Size(
424
  "total_time:Q", title="πŸ•’ Inference Time", scale=alt.Scale(rangeMin=100)
425
  ), # Size by quantitative field
@@ -439,7 +440,7 @@ def make_roc(results, show_text=False):
439
  .encode(
440
  x=alt.X("FA:Q", title="πŸ§‘β€πŸŽ€ False Positive Rate", scale=alt.Scale(domain=[0, 1])),
441
  y=alt.Y("generated_accuracy:Q", title="πŸ‘€ True Positive Rate", scale=alt.Scale(domain=[0, 1])),
442
- color="team:N", # Color by categorical field
443
  text="team",
444
  )
445
  )
@@ -468,7 +469,7 @@ def make_acc(results, show_text=False):
468
  title="Balanced Accuracy",
469
  scale=alt.Scale(domain=[0.4, 1]),
470
  ),
471
- color="team:N", # Color by categorical field # Size by quantitative field
472
  )
473
  .properties(width=400, height=400, title="Inference Time vs Balanced Accuracy")
474
  )
@@ -491,7 +492,7 @@ def make_acc(results, show_text=False):
491
  title="Balanced Accuracy",
492
  scale=alt.Scale(domain=[0.4, 1]),
493
  ),
494
- color="team:N", # Color by categorical field # Size by quantitative field
495
  text="team",
496
  )
497
  )
 
14
  "video-challenge-task-2-config": ["source", "category"],
15
  }
16
  valid_splits = ["public", "private"]
17
+ with st.sidebar:
18
+ color_map =st.selectbox("colormap",["paired","category20","category20b","category20c","set2","set3"])
19
 
20
  #####################################################################
21
  ## Data loading ##
 
141
  # if rocs["team"].nunique() > 1:
142
  color_field = "team:N"
143
 
144
+ roc_chart = alt.Chart(rocs).mark_line().encode(x="fpr", y="tpr", color=alt.Color(color_field, scale=alt.Scale(scheme=color_map)), detail="submission_id:N")
145
 
146
  return roc_chart
147
 
 
420
  .encode(
421
  x=alt.X("FA:Q", title="πŸ§‘β€πŸŽ€ False Positive Rate", scale=alt.Scale(domain=[0.0, 1.0])),
422
  y=alt.Y("generated_accuracy:Q", title="πŸ‘€ True Positive Rate", scale=alt.Scale(domain=[0.0, 1.0])),
423
+ color=alt.Color('team:N', scale=alt.Scale(scheme=color_map)), # Color by categorical field
424
  size=alt.Size(
425
  "total_time:Q", title="πŸ•’ Inference Time", scale=alt.Scale(rangeMin=100)
426
  ), # Size by quantitative field
 
440
  .encode(
441
  x=alt.X("FA:Q", title="πŸ§‘β€πŸŽ€ False Positive Rate", scale=alt.Scale(domain=[0, 1])),
442
  y=alt.Y("generated_accuracy:Q", title="πŸ‘€ True Positive Rate", scale=alt.Scale(domain=[0, 1])),
443
+ color=alt.Color('team:N', scale=alt.Scale(scheme=color_map)), # Color by categorical field
444
  text="team",
445
  )
446
  )
 
469
  title="Balanced Accuracy",
470
  scale=alt.Scale(domain=[0.4, 1]),
471
  ),
472
+ color=alt.Color('team:N', scale=alt.Scale(scheme=color_map)), # Color by categorical field # Size by quantitative field
473
  )
474
  .properties(width=400, height=400, title="Inference Time vs Balanced Accuracy")
475
  )
 
492
  title="Balanced Accuracy",
493
  scale=alt.Scale(domain=[0.4, 1]),
494
  ),
495
+ color=alt.Color('team:N', scale=alt.Scale(scheme=color_map)), # Color by categorical field # Size by quantitative field
496
  text="team",
497
  )
498
  )
utils.py CHANGED
@@ -141,18 +141,18 @@ def extract_roc(results: Dict[str, Any]) -> Dict[str, Any]:
141
  return new
142
 
143
 
144
- def add_custom_submission(path_to_cache, path_to_subfile):
145
  import pandas as pd
146
  import json
147
 
148
  data = pd.read_csv(path_to_subfile)
149
  data["id"] = data["ID"]
150
  data["score"] = data["Score"]
151
- data["pred"] = data["Pred"].apply(lambda a: "generated" if a == 1 else "real")
152
 
153
  team_id = "insiders-id-1-2-3"
154
  team_name = "insiders"
155
- submission_id = "sub1"
156
 
157
  ## update teams
158
  teams = json.load(open(path_to_cache + "/teams.json"))
@@ -163,11 +163,14 @@ def add_custom_submission(path_to_cache, path_to_subfile):
163
 
164
  ## create submission
165
 
166
- with open(path_to_cache + f"/submission_info/{team_id}.json", "w") as f:
167
- temp = {
168
- "id": team_id,
169
- "submissions": [
170
- {
 
 
 
171
  "datetime": "2025-09-22 14:42:14",
172
  "submission_id": submission_id,
173
  "submission_comment": "",
@@ -178,15 +181,21 @@ def add_custom_submission(path_to_cache, path_to_subfile):
178
  "selected": True,
179
  "public_score": {},
180
  "private_score": {},
181
- }
182
- ],
183
- }
184
  json.dump(temp, f)
185
 
186
  data.loc[:, ["id", "pred", "score"]].to_csv(
187
  path_to_cache + f"/submissions/{team_id}-{submission_id}.csv", index=False
188
  )
189
 
 
 
 
 
 
 
190
 
191
  if __name__ == "__main__":
192
 
@@ -198,6 +207,14 @@ if __name__ == "__main__":
198
  ]
199
  download_competition_data(competition_names=spaces)
200
 
 
 
 
 
 
 
 
 
201
  ## Loop
202
  for space in spaces:
203
  local_dir = Path("competition_cache") / space
 
141
  return new
142
 
143
 
144
+ def add_custom_submission(path_to_cache, path_to_subfile, threshold = 0):
145
  import pandas as pd
146
  import json
147
 
148
  data = pd.read_csv(path_to_subfile)
149
  data["id"] = data["ID"]
150
  data["score"] = data["Score"]
151
+ data["pred"] = data["score"].apply(lambda a: "generated" if a >= threshold else "real")
152
 
153
  team_id = "insiders-id-1-2-3"
154
  team_name = "insiders"
155
+ submission_id = f"sub{threshold}".replace(".","")
156
 
157
  ## update teams
158
  teams = json.load(open(path_to_cache + "/teams.json"))
 
163
 
164
  ## create submission
165
 
166
+ submission_info_file = path_to_cache + f"/submission_info/{team_id}.json"
167
+
168
+ if os.path.exists(submission_info_file):
169
+ temp = json.load(open(submission_info_file))
170
+ else:
171
+ temp = {"id": team_id,"submissions": []}
172
+
173
+ temp["submissions"].append({
174
  "datetime": "2025-09-22 14:42:14",
175
  "submission_id": submission_id,
176
  "submission_comment": "",
 
181
  "selected": True,
182
  "public_score": {},
183
  "private_score": {},
184
+ })
185
+
186
+ with open(submission_info_file, "w") as f:
187
  json.dump(temp, f)
188
 
189
  data.loc[:, ["id", "pred", "score"]].to_csv(
190
  path_to_cache + f"/submissions/{team_id}-{submission_id}.csv", index=False
191
  )
192
 
193
+ def create_custom_subs():
194
+ import numpy as np
195
+ for threshold in np.linspace(-6,0,10):
196
+ add_custom_submission(path_to_cache="competition_cache/safe-challenge/video-challenge-task-1-config",
197
+ path_to_subfile="competition_cache/custom/Scores-DSRI-brian.txt", threshold=threshold)
198
+
199
 
200
  if __name__ == "__main__":
201
 
 
207
  ]
208
  download_competition_data(competition_names=spaces)
209
 
210
+
211
+ if os.environ.get("MAKE_CUSTOM"):
212
+ print("adding custom subs")
213
+ create_custom_subs()
214
+
215
+
216
+
217
+
218
  ## Loop
219
  for space in spaces:
220
  local_dir = Path("competition_cache") / space