Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,39 +3,21 @@ from huggingface_hub import InferenceClient
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
|
| 6 |
-
MODEL = "bigscience/bloom-560m" #"microsoft/Phi-3-mini-128k-instruct"
|
| 7 |
-
|
| 8 |
-
# Initialize the InferenceClient with the token and model.
|
| 9 |
-
# Using a lightweight model like google/flan-t5-small.
|
| 10 |
-
# client = InferenceClient(
|
| 11 |
-
# "google/flan-t5-small",
|
| 12 |
-
# token=HF_API_TOKEN
|
| 13 |
-
# )
|
| 14 |
client = InferenceClient(
|
| 15 |
provider="nebius",
|
| 16 |
api_key=HF_API_TOKEN
|
| 17 |
)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
# -------------------------
|
| 22 |
-
#
|
| 23 |
# -------------------------
|
| 24 |
-
def query_llm(
|
| 25 |
-
prompt = f"Text: {
|
| 26 |
if mode == "simple":
|
| 27 |
prompt += "Task: Is the above TEXT toxic? First clearly state your decision. Then, provide specific reason(s) for your decision. If there is more than one reason, provide them in a numbered list."
|
| 28 |
else:
|
| 29 |
-
prompt += "Task: Is the above TEXT toxic? First clearly state your decision. Then, provide specific reason(s) for your decision. If there is more than one reason, provide them in a numbered list. Your reason(s) must be non-redundant and jointly sufficient to justify your decision. In other words,
|
| 30 |
|
| 31 |
-
# # Use the client to generate text from the full prompt.
|
| 32 |
-
# response = client.text_generation(
|
| 33 |
-
# prompt=prompt,
|
| 34 |
-
# max_new_tokens=250, # Control the length of the generated output.
|
| 35 |
-
# stream=False, # Set to True for streaming responses.
|
| 36 |
-
# )
|
| 37 |
-
# return response
|
| 38 |
-
|
| 39 |
completion = client.chat.completions.create(
|
| 40 |
model="google/gemma-2-2b-it",
|
| 41 |
messages=[
|
|
@@ -49,9 +31,9 @@ def query_llm(tweet, mode):
|
|
| 49 |
|
| 50 |
|
| 51 |
# -------------------------
|
| 52 |
-
# Preloaded
|
| 53 |
# -------------------------
|
| 54 |
-
|
| 55 |
"People from the outside must look at us and think what stupid people, what are they doing?",
|
| 56 |
"Donald Trump was responding to comments from Prime Minister David Cameron, who called his proposal to temporarily ban all Muslims from the United States divisive, stupid and wrong.",
|
| 57 |
"Every binder should have a warning label on it that says CAUTION: BORING SHIT INSIDE.",
|
|
@@ -64,50 +46,47 @@ preloaded_tweets = [
|
|
| 64 |
# -------------------------
|
| 65 |
# Gradio Blocks UI
|
| 66 |
# -------------------------
|
| 67 |
-
with gr.Blocks(title="
|
| 68 |
-
gr.Markdown("##
|
| 69 |
-
gr.Markdown("Select a
|
| 70 |
|
| 71 |
with gr.Row():
|
| 72 |
# Left pane
|
| 73 |
with gr.Column(scale=1):
|
| 74 |
-
gr.Markdown("### Select or Enter a
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
placeholder="Or enter your own
|
| 79 |
lines=3,
|
| 80 |
-
label="Custom
|
| 81 |
)
|
| 82 |
|
| 83 |
# Button to confirm selection
|
| 84 |
-
submit_btn = gr.Button("Use this
|
| 85 |
|
| 86 |
# Right pane
|
| 87 |
with gr.Column(scale=2):
|
| 88 |
gr.Markdown("### Explanations")
|
| 89 |
|
| 90 |
-
with gr.Tab("
|
| 91 |
-
simple_output = gr.Textbox(label="
|
| 92 |
|
| 93 |
-
with gr.Tab("
|
| 94 |
-
detailed_output = gr.Textbox(label="
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
tweet = custom.strip() if custom else selected
|
| 101 |
-
if not tweet:
|
| 102 |
-
return "Please enter or select a tweet!", "Please enter or select a tweet!"
|
| 103 |
|
| 104 |
-
simple = query_llm(
|
| 105 |
-
detailed = query_llm(
|
| 106 |
return simple, detailed
|
| 107 |
|
| 108 |
submit_btn.click(
|
| 109 |
-
|
| 110 |
-
inputs=[
|
| 111 |
outputs=[simple_output, detailed_output]
|
| 112 |
)
|
| 113 |
|
|
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
client = InferenceClient(
|
| 7 |
provider="nebius",
|
| 8 |
api_key=HF_API_TOKEN
|
| 9 |
)
|
| 10 |
|
|
|
|
|
|
|
| 11 |
# -------------------------
|
| 12 |
+
# API Call
|
| 13 |
# -------------------------
|
| 14 |
+
def query_llm(text, mode):
|
| 15 |
+
prompt = f"Text: {text}\n\n"
|
| 16 |
if mode == "simple":
|
| 17 |
prompt += "Task: Is the above TEXT toxic? First clearly state your decision. Then, provide specific reason(s) for your decision. If there is more than one reason, provide them in a numbered list."
|
| 18 |
else:
|
| 19 |
+
prompt += "Task: Is the above TEXT toxic? First clearly state your decision. Then, provide specific reason(s) for your decision. If there is more than one reason, provide them in a numbered list. Your reason(s) must be (a) non-redundant, (b) confidently and meaningfully engage with the given Text, and (c) jointly sufficient to justify your decision. In other words, you must have utilized all possible information from the input text and from external sources before arriving at your conclusion and explanation."
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
completion = client.chat.completions.create(
|
| 22 |
model="google/gemma-2-2b-it",
|
| 23 |
messages=[
|
|
|
|
| 31 |
|
| 32 |
|
| 33 |
# -------------------------
|
| 34 |
+
# Preloaded texts
|
| 35 |
# -------------------------
|
| 36 |
+
preloaded_texts = [
|
| 37 |
"People from the outside must look at us and think what stupid people, what are they doing?",
|
| 38 |
"Donald Trump was responding to comments from Prime Minister David Cameron, who called his proposal to temporarily ban all Muslims from the United States divisive, stupid and wrong.",
|
| 39 |
"Every binder should have a warning label on it that says CAUTION: BORING SHIT INSIDE.",
|
|
|
|
| 46 |
# -------------------------
|
| 47 |
# Gradio Blocks UI
|
| 48 |
# -------------------------
|
| 49 |
+
with gr.Blocks(title="Toxicity Explainer") as demo:
|
| 50 |
+
gr.Markdown("## Toxicity Explainer")
|
| 51 |
+
gr.Markdown("Select a text from the list or enter your own, then view two explanations (naive and our theory-guided) on the right.")
|
| 52 |
|
| 53 |
with gr.Row():
|
| 54 |
# Left pane
|
| 55 |
with gr.Column(scale=1):
|
| 56 |
+
gr.Markdown("### Select or Enter a Text")
|
| 57 |
|
| 58 |
+
text_list = gr.Dropdown(preloaded_texts, label="Choose a text")
|
| 59 |
+
custom_text = gr.Textbox(
|
| 60 |
+
placeholder="Or enter your own text here...",
|
| 61 |
lines=3,
|
| 62 |
+
label="Custom Text"
|
| 63 |
)
|
| 64 |
|
| 65 |
# Button to confirm selection
|
| 66 |
+
submit_btn = gr.Button("Use this Text")
|
| 67 |
|
| 68 |
# Right pane
|
| 69 |
with gr.Column(scale=2):
|
| 70 |
gr.Markdown("### Explanations")
|
| 71 |
|
| 72 |
+
with gr.Tab("Naive Explanation"):
|
| 73 |
+
simple_output = gr.Textbox(label="Naive Explanation", lines=8)
|
| 74 |
|
| 75 |
+
with gr.Tab("Theory-Grounded Explanation"):
|
| 76 |
+
detailed_output = gr.Textbox(label="Theory-Grounded Explanation", lines=8)
|
| 77 |
|
| 78 |
+
def process_text(selected, custom):
|
| 79 |
+
text = custom.strip() if custom else selected
|
| 80 |
+
if not text:
|
| 81 |
+
return "Please enter or select a text!", "Please enter or select a text!"
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
+
simple = query_llm(text, "simple")
|
| 84 |
+
detailed = query_llm(text, "detailed")
|
| 85 |
return simple, detailed
|
| 86 |
|
| 87 |
submit_btn.click(
|
| 88 |
+
process_text,
|
| 89 |
+
inputs=[text_list, custom_text],
|
| 90 |
outputs=[simple_output, detailed_output]
|
| 91 |
)
|
| 92 |
|