Shiyu Zhao commited on
Commit
360a3ae
·
1 Parent(s): 5cf2606

Update space

Browse files
app.py CHANGED
@@ -518,58 +518,6 @@ def get_emails_from_metadata(meta_data):
518
  """
519
  return [email.strip() for email in meta_data.get("Contact Email(s)", "").split(";")]
520
 
521
- def send_error_notification(meta_data, error_info):
522
- """
523
- Sends an email notification about an error during the evaluation process.
524
-
525
- Args:
526
- meta_data (dict): Submission metadata to be included in the email.
527
- error_info (str): Error message or notification content to be included in the email.
528
-
529
- Returns:
530
- None
531
- """
532
- emails_to_send = get_emails_from_metadata(meta_data)
533
- send_from = 'stark-qa@cs.stanford.edu'
534
- recipients_str = ', '.join(emails_to_send)
535
-
536
- # Create the email container
537
- msg = MIMEMultipart('alternative')
538
- msg['Subject'] = 'STaRK Leaderboard Submission - Error Notification'
539
- msg['From'] = send_from
540
- msg['To'] = recipients_str
541
-
542
- # Format the metadata table
543
- metadata_table = format_metadata_as_table(meta_data)
544
-
545
- # Email body content with metadata table
546
- body = f"""
547
- <p>Dear STaRK Leaderboard Participant,</p>
548
-
549
- <p>We encountered an issue during the evaluation of your recent submission:</p>
550
-
551
- <p><i>{error_info}</i></p>
552
-
553
- <p>Please verify your inputs and resubmit. If the issue persists, feel free to contact us at stark-qa@cs.stanford.edu with the error details and your dataset information.</p>
554
-
555
- <p>Submitted Metadata:</p>
556
- {metadata_table}
557
-
558
- <p>Thank you for your participation.</p>
559
-
560
- <p>Best regards,<br>The STaRK QA Team</p>
561
- """
562
-
563
- msg.attach(MIMEText(body, 'html'))
564
-
565
- # Send the email
566
- try:
567
- with smtplib.SMTP('localhost') as server:
568
- server.sendmail(send_from, emails_to_send, msg.as_string()) # No CC for error notification
569
- print("Error notification sent successfully.")
570
- except Exception as e:
571
- print(f"Failed to send error notification: {e}")
572
-
573
  def format_evaluation_results(results):
574
  """
575
  Formats the evaluation results dictionary into a readable string.
@@ -582,61 +530,6 @@ def format_evaluation_results(results):
582
  """
583
  result_lines = [f"{metric}: {value}" for metric, value in results.items()]
584
  return "\n".join(result_lines)
585
-
586
- # Function to send a submission confirmation with evaluation results and metadata, CCing the sender
587
- def send_submission_confirmation(meta_data, eval_results):
588
- """
589
- Sends an email notification confirming submission and including evaluation results.
590
- Modified to handle SMTP connection properly.
591
- """
592
- try:
593
- emails_to_send = get_emails_from_metadata(meta_data)
594
- send_from = 'stark-qa@cs.stanford.edu'
595
- recipients_str = ', '.join(emails_to_send)
596
-
597
- msg = MIMEMultipart('alternative')
598
- msg['Subject'] = 'STaRK Leaderboard Submission - Evaluation Results'
599
- msg['From'] = send_from
600
- msg['To'] = recipients_str
601
- msg['Cc'] = send_from
602
-
603
- formatted_results = format_evaluation_results(eval_results)
604
- metadata_table = format_metadata_as_table(meta_data)
605
-
606
- body = f"""
607
- <p>Dear STaRK Leaderboard Participant,</p>
608
-
609
- <p>Thank you for your submission to the STaRK leaderboard. Below are the results of your submission:</p>
610
-
611
- <pre>{formatted_results}</pre>
612
-
613
- <p>Submitted Metadata:</p>
614
- {metadata_table}
615
-
616
- <p>Your results have been added to the leaderboard. If you would like to withdraw your submission,
617
- please reply to this email with "withdrawn."</p>
618
-
619
- <p>Best regards,<br>The STaRK QA Team</p>
620
- """
621
-
622
- msg.attach(MIMEText(body, 'html'))
623
-
624
- # Modified SMTP connection handling
625
- try:
626
- # First try localhost
627
- with smtplib.SMTP('localhost') as server:
628
- server.send_message(msg)
629
- except:
630
- # If localhost fails, try connecting to a remote SMTP server
631
- with smtplib.SMTP('smtp.stanford.edu', 587) as server:
632
- server.starttls()
633
- server.send_message(msg)
634
-
635
- print(f"Submission confirmation sent successfully to {recipients_str}")
636
- except Exception as e:
637
- print(f"Warning: Failed to send email notification: {str(e)}")
638
- # Continue with submission even if email fails
639
- pass
640
 
641
  def process_submission(
642
  method_name, team_name, dataset, split, contact_email,
@@ -708,7 +601,7 @@ def process_submission(
708
  )
709
 
710
  if isinstance(results, str):
711
- send_error_notification(meta_data, results)
712
  return f"Evaluation error: {results}"
713
 
714
  # Process results
@@ -779,7 +672,7 @@ def process_submission(
779
  raise RuntimeError(f"Failed to save files to HuggingFace Hub: {str(e)}")
780
 
781
  # Send confirmation email and update leaderboard
782
- send_submission_confirmation(meta_data, processed_results)
783
  update_leaderboard_data(submission_data)
784
  demo.update()
785
 
@@ -801,7 +694,7 @@ def process_submission(
801
 
802
  except Exception as e:
803
  error_message = f"Error processing submission: {str(e)}"
804
- send_error_notification(meta_data, error_message)
805
  return error_message
806
  finally:
807
  # Clean up temporary files
 
518
  """
519
  return [email.strip() for email in meta_data.get("Contact Email(s)", "").split(";")]
520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
521
  def format_evaluation_results(results):
522
  """
523
  Formats the evaluation results dictionary into a readable string.
 
530
  """
531
  result_lines = [f"{metric}: {value}" for metric, value in results.items()]
532
  return "\n".join(result_lines)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
534
  def process_submission(
535
  method_name, team_name, dataset, split, contact_email,
 
601
  )
602
 
603
  if isinstance(results, str):
604
+ # send_error_notification(meta_data, results)
605
  return f"Evaluation error: {results}"
606
 
607
  # Process results
 
672
  raise RuntimeError(f"Failed to save files to HuggingFace Hub: {str(e)}")
673
 
674
  # Send confirmation email and update leaderboard
675
+ # send_submission_confirmation(meta_data, processed_results)
676
  update_leaderboard_data(submission_data)
677
  demo.update()
678
 
 
694
 
695
  except Exception as e:
696
  error_message = f"Error processing submission: {str(e)}"
697
+ # send_error_notification(meta_data, error_message)
698
  return error_message
699
  finally:
700
  # Clean up temporary files
submissions/ance_test_tester1/latest.json DELETED
@@ -1,6 +0,0 @@
1
- {
2
- "latest_submission": "20241023_233630",
3
- "status": "approved",
4
- "method_name": "ance_test",
5
- "team_name": "tester1"
6
- }
 
 
 
 
 
 
 
submissions/ance_test_tester1/metadata_20241023_233630.json DELETED
@@ -1,20 +0,0 @@
1
- {
2
- "Method Name": "ance_test",
3
- "Team Name": "tester1",
4
- "Dataset": "amazon",
5
- "Split": "human_generated_eval",
6
- "Contact Email(s)": "shiyuz@stanford.edu",
7
- "Code Repository": "https://github.com/snap-stanford/stark",
8
- "Model Description": "tester1",
9
- "Hardware": "a100",
10
- "(Optional) Paper link": "",
11
- "results": {
12
- "hit@1": 25.93,
13
- "hit@5": 54.32,
14
- "recall@20": 23.69,
15
- "mrr": 37.08
16
- },
17
- "status": "approved",
18
- "submission_date": "2024-10-23 23:37:03",
19
- "csv_path": "submissions/ance_test_tester1/predictions_20241023_233630.csv"
20
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
submissions/ance_test_tester1/predictions_20241023_233630.csv DELETED
The diff for this file is too large to render. See raw diff