Update app.py
Browse files
app.py
CHANGED
@@ -6,16 +6,16 @@ import os
|
|
6 |
auth_token = os.environ['HF_TOKEN']
|
7 |
|
8 |
# Load the tokenizer and models for the first pipeline
|
9 |
-
|
10 |
-
|
11 |
tokenizer_ext.model_max_length = 512
|
12 |
-
|
13 |
|
14 |
# Load the tokenizer and models for the second pipeline
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
|
20 |
# Load the tokenizer and models for the third pipeline
|
21 |
model1 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_Int_segment", num_labels=1, token=auth_token)
|
@@ -65,21 +65,21 @@ def process_classification(text, model1, model2, tokenizer1):
|
|
65 |
return f"{round(prediction1, 1)}", f"{round(prediction2, 1)}", f"{round(score, 2)}"
|
66 |
|
67 |
def all(text):
|
68 |
-
return process_ner(text,
|
69 |
|
70 |
# Define Gradio interface
|
71 |
iface = gr.Interface(
|
72 |
fn=all,
|
73 |
inputs=gr.Textbox(placeholder="Enter sentence here..."),
|
74 |
outputs=[
|
75 |
-
gr.HighlightedText(label="
|
76 |
-
gr.HighlightedText(label="
|
77 |
gr.Label(label="Internal Detail Count"),
|
78 |
gr.Label(label="External Detail Count"),
|
79 |
gr.Label(label="Approximated Internal Detail Ratio")
|
80 |
],
|
81 |
-
title="
|
82 |
-
description="This demo combines two
|
83 |
theme="monochrome"
|
84 |
)
|
85 |
|
|
|
6 |
auth_token = os.environ['HF_TOKEN']
|
7 |
|
8 |
# Load the tokenizer and models for the first pipeline
|
9 |
+
tokenizer_ = binAutoTokenizer.from_pretrained("AlGe/deberta-v3-large_token", token=auth_token)
|
10 |
+
model_bin = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_token", token=auth_token)
|
11 |
tokenizer_ext.model_max_length = 512
|
12 |
+
pipe_bin = pipeline("ner", model=model_bin, tokenizer=tokenizer_bin)
|
13 |
|
14 |
# Load the tokenizer and models for the second pipeline
|
15 |
+
tokenizer_ext = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_AIS-token", token=auth_token)
|
16 |
+
model_ext = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_AIS-token", token=auth_token)
|
17 |
+
tokenizer_ext.model_max_length = 512
|
18 |
+
pipe_ext = pipeline("ner", model=model_ext, tokenizer=tokenizer_ext)
|
19 |
|
20 |
# Load the tokenizer and models for the third pipeline
|
21 |
model1 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_Int_segment", num_labels=1, token=auth_token)
|
|
|
65 |
return f"{round(prediction1, 1)}", f"{round(prediction2, 1)}", f"{round(score, 2)}"
|
66 |
|
67 |
def all(text):
|
68 |
+
return process_ner(text, pipe_bin), process_ner(text, pipe_ext), process_classification(text, model1, model2, tokenizer1)[0], process_classification(text, model1, model2, tokenizer1)[1], process_classification(text, model1, model2, tokenizer1)[2]
|
69 |
|
70 |
# Define Gradio interface
|
71 |
iface = gr.Interface(
|
72 |
fn=all,
|
73 |
inputs=gr.Textbox(placeholder="Enter sentence here..."),
|
74 |
outputs=[
|
75 |
+
gr.HighlightedText(label="Binary Sequence Classification"),
|
76 |
+
gr.HighlightedText(label="Extended Sequence Classification"),
|
77 |
gr.Label(label="Internal Detail Count"),
|
78 |
gr.Label(label="External Detail Count"),
|
79 |
gr.Label(label="Approximated Internal Detail Ratio")
|
80 |
],
|
81 |
+
title="Autobiographical Memory Scoring Demo",
|
82 |
+
description="Precision Memory Analysis: This demo combines two text - and two sequence classification models to showcase our automated Autobiographical Interview scoring Method. Enter a narrative to see the results.",
|
83 |
theme="monochrome"
|
84 |
)
|
85 |
|