Spaces:
Sleeping
Sleeping
change granuality options to sentence/para
Browse files- app.py +8 -6
- isotonic_regression_model.joblib +0 -0
- plagiarism.py +1 -1
app.py
CHANGED
@@ -131,8 +131,8 @@ with gr.Blocks() as demo:
|
|
131 |
# with gr.Row():
|
132 |
# btn = gr.Button("Bias Buster")
|
133 |
# out = gr.Textbox(label="Bias Corrected Full Input", interactive=False)
|
134 |
-
# corrections_output = gr.Textbox(label="Bias Corrections", interactive=False)
|
135 |
-
# btn.click(fn=update, inputs=input_text, outputs=[out, corrections_output])
|
136 |
|
137 |
with gr.Row():
|
138 |
models = gr.Dropdown(
|
@@ -157,9 +157,9 @@ with gr.Blocks() as demo:
|
|
157 |
)
|
158 |
with gr.Row():
|
159 |
source_block_size = gr.Dropdown(
|
160 |
-
choices=["
|
161 |
label="Source Check Granularity",
|
162 |
-
value="
|
163 |
interactive=True,
|
164 |
)
|
165 |
|
@@ -389,5 +389,7 @@ with gr.Blocks() as demo:
|
|
389 |
date_from = ""
|
390 |
date_to = ""
|
391 |
|
392 |
-
if __name__ == "__main__":
|
393 |
-
demo.launch(
|
|
|
|
|
|
131 |
# with gr.Row():
|
132 |
# btn = gr.Button("Bias Buster")
|
133 |
# out = gr.Textbox(label="Bias Corrected Full Input", interactive=False)
|
134 |
+
# corrections_output = gr.Textbox(label="Bias Corrections", interactive=False)
|
135 |
+
# btn.click(fn=update, inputs=input_text, outputs=[out, corrections_output])
|
136 |
|
137 |
with gr.Row():
|
138 |
models = gr.Dropdown(
|
|
|
157 |
)
|
158 |
with gr.Row():
|
159 |
source_block_size = gr.Dropdown(
|
160 |
+
choices=["Sentence", "Paragraph"],
|
161 |
label="Source Check Granularity",
|
162 |
+
value="Sentence",
|
163 |
interactive=True,
|
164 |
)
|
165 |
|
|
|
389 |
date_from = ""
|
390 |
date_to = ""
|
391 |
|
392 |
+
if __name__ == "__main__":
|
393 |
+
demo.launch(
|
394 |
+
share=True, server_name="0.0.0.0", auth=("polygraf-admin", "test@aisd")
|
395 |
+
)
|
isotonic_regression_model.joblib
CHANGED
Binary files a/isotonic_regression_model.joblib and b/isotonic_regression_model.joblib differ
|
|
plagiarism.py
CHANGED
@@ -72,7 +72,7 @@ def split_sentence_blocks(text, size):
|
|
72 |
return blocks
|
73 |
else:
|
74 |
blocks = []
|
75 |
-
size =
|
76 |
for para in text.split("\n\n"):
|
77 |
sents = sent_tokenize(para)
|
78 |
for i in range(len(sents)):
|
|
|
72 |
return blocks
|
73 |
else:
|
74 |
blocks = []
|
75 |
+
size = 1
|
76 |
for para in text.split("\n\n"):
|
77 |
sents = sent_tokenize(para)
|
78 |
for i in range(len(sents)):
|