Tristan Thrush commited on
Commit
d2c90f3
β€’
1 Parent(s): 2dfb09c

fixed merge issue

Browse files
Files changed (1) hide show
  1. app.py +61 -60
app.py CHANGED
@@ -307,72 +307,73 @@ with st.form(key="form"):
307
  selected_models = st.multiselect("Select the models you wish to evaluate", compatible_models)
308
  submit_button = st.form_submit_button("Make submission")
309
  if submit_button:
310
- project_id = str(uuid.uuid4())[:3]
311
- payload = {
312
- "username": AUTOTRAIN_USERNAME,
313
- "proj_name": f"my-eval-project-{project_id}",
314
- "task": TASK_TO_ID[selected_task],
315
- "config": {
316
- "language": "en",
317
- "max_models": 5,
318
- "instance": {
319
- "provider": "aws",
320
- "instance_type": "ml.g4dn.4xlarge",
321
- "max_runtime_seconds": 172800,
322
- "num_instances": 1,
323
- "disk_size_gb": 150,
324
- },
325
- "evaluation": {
326
- "metrics": selected_metrics,
327
- "models": selected_models,
328
- },
329
- },
330
- }
331
- print(f"Payload: {payload}")
332
- project_json_resp = http_post(
333
- path="/projects/create",
334
- payload=payload,
335
- token=HF_TOKEN,
336
- domain=AUTOTRAIN_BACKEND_API,
337
- ).json()
338
- print(project_json_resp)
339
-
340
- if project_json_resp["created"]:
341
  payload = {
342
- "split": 4, # use "auto" split choice in AutoTrain
343
- "col_mapping": col_mapping,
344
- "load_config": {"max_size_bytes": 0, "shuffle": False},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
  }
346
- data_json_resp = http_post(
347
- path=f"/projects/{project_json_resp['id']}/data/{selected_dataset}",
 
348
  payload=payload,
349
  token=HF_TOKEN,
350
  domain=AUTOTRAIN_BACKEND_API,
351
- params={
352
- "type": "dataset",
353
- "config_name": selected_config,
354
- "split_name": selected_split,
355
- },
356
  ).json()
357
- print(data_json_resp)
358
- if data_json_resp["download_status"] == 1:
359
- train_json_resp = http_get(
360
- path=f"/projects/{project_json_resp['id']}/data/start_process",
 
 
 
 
 
 
 
361
  token=HF_TOKEN,
362
  domain=AUTOTRAIN_BACKEND_API,
 
 
 
 
 
363
  ).json()
364
- print(train_json_resp)
365
- if train_json_resp["success"]:
366
- st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
367
- st.markdown(
368
- f"""
369
- Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
370
-
371
- πŸ“Š Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) \
372
- to view the results from your submission
373
- """
374
- )
375
- else:
376
- st.error("πŸ™ˆ Oh noes, there was an error submitting your evaluation job!")
377
- else:
378
- st.warning("⚠️ No models were selected for evaluation!")
 
 
 
 
 
 
 
 
307
  selected_models = st.multiselect("Select the models you wish to evaluate", compatible_models)
308
  submit_button = st.form_submit_button("Make submission")
309
  if submit_button:
310
+ if len(selected_models) > 0:
311
+ project_id = str(uuid.uuid4())[:3]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  payload = {
313
+ "username": AUTOTRAIN_USERNAME,
314
+ "proj_name": f"my-eval-project-{project_id}",
315
+ "task": TASK_TO_ID[selected_task],
316
+ "config": {
317
+ "language": "en",
318
+ "max_models": 5,
319
+ "instance": {
320
+ "provider": "aws",
321
+ "instance_type": "ml.g4dn.4xlarge",
322
+ "max_runtime_seconds": 172800,
323
+ "num_instances": 1,
324
+ "disk_size_gb": 150,
325
+ },
326
+ "evaluation": {
327
+ "metrics": selected_metrics,
328
+ "models": selected_models,
329
+ },
330
+ },
331
  }
332
+ print(f"Payload: {payload}")
333
+ project_json_resp = http_post(
334
+ path="/projects/create",
335
  payload=payload,
336
  token=HF_TOKEN,
337
  domain=AUTOTRAIN_BACKEND_API,
 
 
 
 
 
338
  ).json()
339
+ print(project_json_resp)
340
+
341
+ if project_json_resp["created"]:
342
+ payload = {
343
+ "split": 4, # use "auto" split choice in AutoTrain
344
+ "col_mapping": col_mapping,
345
+ "load_config": {"max_size_bytes": 0, "shuffle": False},
346
+ }
347
+ data_json_resp = http_post(
348
+ path=f"/projects/{project_json_resp['id']}/data/{selected_dataset}",
349
+ payload=payload,
350
  token=HF_TOKEN,
351
  domain=AUTOTRAIN_BACKEND_API,
352
+ params={
353
+ "type": "dataset",
354
+ "config_name": selected_config,
355
+ "split_name": selected_split,
356
+ },
357
  ).json()
358
+ print(data_json_resp)
359
+ if data_json_resp["download_status"] == 1:
360
+ train_json_resp = http_get(
361
+ path=f"/projects/{project_json_resp['id']}/data/start_process",
362
+ token=HF_TOKEN,
363
+ domain=AUTOTRAIN_BACKEND_API,
364
+ ).json()
365
+ print(train_json_resp)
366
+ if train_json_resp["success"]:
367
+ st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
368
+ st.markdown(
369
+ f"""
370
+ Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
371
+
372
+ πŸ“Š Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) \
373
+ to view the results from your submission
374
+ """
375
+ )
376
+ else:
377
+ st.error("πŸ™ˆ Oh noes, there was an error submitting your evaluation job!")
378
+ else:
379
+ st.warning("⚠️ No models were selected for evaluation!")