GitHubDragonFly commited on
Commit
0900ac5
·
verified ·
1 Parent(s): 1816cd9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -0
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+
5
+ # Load a small, free text generation model
6
+ generator = pipeline("text-generation", model="bigscience/bloomz-560m")
7
+
8
+ # Load a moderation pipeline
9
+ moderator = pipeline("text-classification", model="unitary/toxic-bert")
10
+
11
+ # Simple in-memory rate limiter
12
+ request_log = {}
13
+ MAX_REQUESTS = 10 # max requests
14
+ WINDOW_SECONDS = 60 # per 60 seconds
15
+
16
+ def is_rate_limited(ip):
17
+ now = time.time()
18
+ window_start = now - WINDOW_SECONDS
19
+ history = request_log.get(ip, [])
20
+ # keep only recent requests
21
+ history = [t for t in history if t > window_start]
22
+ if len(history) >= MAX_REQUESTS:
23
+ return True
24
+ history.append(now)
25
+ request_log[ip] = history
26
+ return False
27
+
28
+ def contains_profanity(text, threshold=0.5):
29
+ """
30
+ Uses a Hugging Face moderation model to detect toxicity.
31
+ Returns True if the text is likely toxic/profane.
32
+ """
33
+ results = moderator(text)
34
+
35
+ # results looks like: [{'label': 'toxic', 'score': 0.87}]
36
+ label = results[0]['label']
37
+ score = results[0]['score']
38
+
39
+ return label.lower() == "toxic" and score >= threshold
40
+
41
+ # Define your function with some safeguards
42
+ def generate_conclusion(user_input, request: gr.Request):
43
+ ip = request.client.host if request else "unknown"
44
+ if is_rate_limited(ip):
45
+ return "⚠️ Too many requests. Please wait a bit before trying again."
46
+
47
+ if not user_input.strip():
48
+ return "⚠️ Please enter some text."
49
+
50
+ if contains_profanity(user_input):
51
+ return "⚠️ Your input contains inappropriate or toxic language."
52
+
53
+ # Limit input length
54
+ if len(user_input) > 300:
55
+ return "⚠️ Input too long. Please keep it under 300 characters."
56
+
57
+ # Add your instruction prompt
58
+ prompt = (
59
+ "Generate only the final conclusion from the following input. "
60
+ "Do not include elaboration, explanation, or supporting details.\n\n"
61
+ f"{user_input}"
62
+ )
63
+
64
+ # Generate with limits to prevent abuse
65
+ try:
66
+ output = generator(
67
+ prompt,
68
+ max_new_tokens=50, # keep responses short
69
+ do_sample=True,
70
+ temperature=0.7,
71
+ return_full_text=False
72
+ )
73
+
74
+ return output[0]["generated_text"]
75
+ except Exception as e:
76
+ return f"⚠️ An error occurred: {str(e)}"
77
+
78
+ # Gradio interface
79
+ demo = gr.Interface(
80
+ fn=generate_conclusion,
81
+ inputs=gr.Textbox(lines=4, placeholder="Enter your text here..."),
82
+ outputs="text",
83
+ title="Conclusion Generator Demo",
84
+ description="Generates a short conclusion from your input. Limited to 300 characters."
85
+ )
86
+
87
+ if __name__ == "__main__":
88
+ demo.queue(concurrency_count=2, max_size=5).launch()