avilum commited on
Commit
ca6dd2f
1 Parent(s): 35bf021

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -161,14 +161,14 @@ def classify_prompt(prompt: str, threshold: float) -> Tuple[dict, gr.DataFrame]:
161
  # res = f"This prompt looks malicious, here are some similar threats:\n{json.dumps(asdict(classification), indent=4)}"
162
  return classification, gr.DataFrame(
163
  [
164
- (r.known_prompt, r.similarity_percentage, r.source)
165
  for r in classification.reason
166
  ]
167
  )
168
 
169
  res = (
170
  f"{classification} - This prompt is not similar to any learned prompt above {int(threshold*100)}%, Try a lower threshold.",
171
- gr.DataFrame([("No similar prompt found", 0.0, "N/A")]),
172
  )
173
  return res
174
 
@@ -198,7 +198,7 @@ iface = gr.Interface(
198
  outputs=[
199
  "text",
200
  gr.Dataframe(
201
- headers=["Prompt", "Similarity", "Source"],
202
  datatype=["str", "number", "str"],
203
  row_count=1,
204
  col_count=(3, "fixed"),
 
161
  # res = f"This prompt looks malicious, here are some similar threats:\n{json.dumps(asdict(classification), indent=4)}"
162
  return classification, gr.DataFrame(
163
  [
164
+ (r.similarity_percentage, r.known_prompt, r.source)
165
  for r in classification.reason
166
  ]
167
  )
168
 
169
  res = (
170
  f"{classification} - This prompt is not similar to any learned prompt above {int(threshold*100)}%, Try a lower threshold.",
171
+ gr.DataFrame([(0.0, "No similar prompt found", "N/A")]),
172
  )
173
  return res
174
 
 
198
  outputs=[
199
  "text",
200
  gr.Dataframe(
201
+ headers=["Similarity", "Prompt", "Source"],
202
  datatype=["str", "number", "str"],
203
  row_count=1,
204
  col_count=(3, "fixed"),