Christian Rene Thelen commited on
Commit
c6175d5
·
1 Parent(s): fbdafda

model.pth should load from Huggingface

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -596,12 +596,13 @@ if __name__ == "__main__":
596
  classifier = SpanClassifierWithStrictF1('xlm-roberta-large')
597
 
598
  classifier.model = AutoModelForTokenClassification.from_pretrained(
599
- 'xlm-roberta-large',
 
600
  num_labels=len(classifier.labels),
601
  id2label=classifier.id2label,
602
  label2id=classifier.label2id
603
  )
604
- classifier.model.load_state_dict(torch.load('./model/subtask2_final_model.pth'))
605
  classifier.model.eval()
606
 
607
  print("Modell geladen! Starte Gradio-Interface...")
 
596
  classifier = SpanClassifierWithStrictF1('xlm-roberta-large')
597
 
598
  classifier.model = AutoModelForTokenClassification.from_pretrained(
599
+ 'cortex359/germeval2025',
600
+ torch_dtype="auto",
601
  num_labels=len(classifier.labels),
602
  id2label=classifier.id2label,
603
  label2id=classifier.label2id
604
  )
605
+ #classifier.model.load_state_dict(torch.load('./model/subtask2_final_model.pth'))
606
  classifier.model.eval()
607
 
608
  print("Modell geladen! Starte Gradio-Interface...")