Spaces:
Running
Running
ThorbenFroehlking
commited on
Commit
·
31865d7
1
Parent(s):
5ed7396
Updated
Browse files- .ipynb_checkpoints/app-checkpoint.py +15 -11
- app.py +15 -11
.ipynb_checkpoints/app-checkpoint.py
CHANGED
@@ -584,6 +584,20 @@ with gr.Blocks(css="""
|
|
584 |
else:
|
585 |
return gr.update(visible=False), gr.update(visible=True)
|
586 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
587 |
mode.change(
|
588 |
toggle_mode,
|
589 |
inputs=[mode],
|
@@ -622,16 +636,6 @@ with gr.Blocks(css="""
|
|
622 |
outputs=[predictions_output, molecule_output, download_output]
|
623 |
)
|
624 |
|
625 |
-
def predict_utils(sequence):
|
626 |
-
input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
|
627 |
-
with torch.no_grad():
|
628 |
-
outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
|
629 |
|
630 |
-
|
631 |
-
raw_scores = expit(outputs[:, 1] - outputs[:, 0])
|
632 |
-
normalized_scores = normalize_scores(raw_scores)
|
633 |
-
|
634 |
-
return raw_scores,normalized_scores
|
635 |
-
|
636 |
demo.launch(share=True)
|
637 |
-
demo.api_name["predict_from_sequence"] = predict_utils
|
|
|
584 |
else:
|
585 |
return gr.update(visible=False), gr.update(visible=True)
|
586 |
|
587 |
+
def predict_utils(sequence):
|
588 |
+
input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
|
589 |
+
with torch.no_grad():
|
590 |
+
outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
|
591 |
+
|
592 |
+
# Calculate scores and normalize them
|
593 |
+
raw_scores = expit(outputs[:, 1] - outputs[:, 0])
|
594 |
+
normalized_scores = normalize_scores(raw_scores)
|
595 |
+
|
596 |
+
return {
|
597 |
+
"raw_scores": raw_scores.tolist(),
|
598 |
+
"normalized_scores": normalized_scores.tolist()
|
599 |
+
}
|
600 |
+
|
601 |
mode.change(
|
602 |
toggle_mode,
|
603 |
inputs=[mode],
|
|
|
636 |
outputs=[predictions_output, molecule_output, download_output]
|
637 |
)
|
638 |
|
|
|
|
|
|
|
|
|
639 |
|
640 |
+
demo.load(predict_utils, inputs=gr.Textbox(), outputs=gr.Textbox())
|
|
|
|
|
|
|
|
|
|
|
641 |
demo.launch(share=True)
|
|
app.py
CHANGED
@@ -584,6 +584,20 @@ with gr.Blocks(css="""
|
|
584 |
else:
|
585 |
return gr.update(visible=False), gr.update(visible=True)
|
586 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
587 |
mode.change(
|
588 |
toggle_mode,
|
589 |
inputs=[mode],
|
@@ -622,16 +636,6 @@ with gr.Blocks(css="""
|
|
622 |
outputs=[predictions_output, molecule_output, download_output]
|
623 |
)
|
624 |
|
625 |
-
def predict_utils(sequence):
|
626 |
-
input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
|
627 |
-
with torch.no_grad():
|
628 |
-
outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
|
629 |
|
630 |
-
|
631 |
-
raw_scores = expit(outputs[:, 1] - outputs[:, 0])
|
632 |
-
normalized_scores = normalize_scores(raw_scores)
|
633 |
-
|
634 |
-
return raw_scores,normalized_scores
|
635 |
-
|
636 |
demo.launch(share=True)
|
637 |
-
demo.api_name["predict_from_sequence"] = predict_utils
|
|
|
584 |
else:
|
585 |
return gr.update(visible=False), gr.update(visible=True)
|
586 |
|
587 |
+
def predict_utils(sequence):
|
588 |
+
input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
|
589 |
+
with torch.no_grad():
|
590 |
+
outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
|
591 |
+
|
592 |
+
# Calculate scores and normalize them
|
593 |
+
raw_scores = expit(outputs[:, 1] - outputs[:, 0])
|
594 |
+
normalized_scores = normalize_scores(raw_scores)
|
595 |
+
|
596 |
+
return {
|
597 |
+
"raw_scores": raw_scores.tolist(),
|
598 |
+
"normalized_scores": normalized_scores.tolist()
|
599 |
+
}
|
600 |
+
|
601 |
mode.change(
|
602 |
toggle_mode,
|
603 |
inputs=[mode],
|
|
|
636 |
outputs=[predictions_output, molecule_output, download_output]
|
637 |
)
|
638 |
|
|
|
|
|
|
|
|
|
639 |
|
640 |
+
demo.load(predict_utils, inputs=gr.Textbox(), outputs=gr.Textbox())
|
|
|
|
|
|
|
|
|
|
|
641 |
demo.launch(share=True)
|
|