Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| import os | |
| import json | |
| import re | |
| # Configure the endpoint and authentication | |
| ENDPOINT_URL = os.environ.get("ENDPOINT_URL", "https://aqxz70rjxilvoxwd.us-east-1.aws.endpoints.huggingface.cloud") | |
| HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "").strip() | |
| def is_token_configured(): | |
| if not HF_API_TOKEN: | |
| return "β οΈ Warning: HF_API_TOKEN is not configured. The app won't work until you add this secret in your Space settings." | |
| return "β API token is configured" | |
| class SafetyChecker: | |
| def __init__(self): | |
| self.ENDPOINT_URL = ENDPOINT_URL | |
| self.HF_API_TOKEN = HF_API_TOKEN | |
| # def extract_and_parse_json(self, response: str): | |
| # match = re.search(r'```(?:json)?\s*(.*?)\s*```', response, re.DOTALL) | |
| # content = match.group(1).strip() if match else response.strip() | |
| # if not content.startswith("{") and ":" in content: | |
| # content = "{" + content + "}" | |
| # try: | |
| # parsed = json.loads(content) | |
| # except json.JSONDecodeError: | |
| # cleaned = content.replace(""", "\"").replace(""", "\"").replace("'", "\"") | |
| # cleaned = re.sub(r',\s*}', '}', cleaned) | |
| # cleaned = re.sub(r',\s*]', ']', cleaned) | |
| # try: | |
| # parsed = json.loads(cleaned) | |
| # except Exception: | |
| # pairs = re.findall(r'"([^"]+)":\s*"?([^",\{\}\[\]]+)"?', content) | |
| # if pairs: | |
| # parsed = {k.strip(): v.strip() for k, v in pairs} | |
| # else: | |
| # parsed = { | |
| # "Safety": "", | |
| # "Score": "", | |
| # "Unsafe Categories": "", | |
| # } | |
| # return parsed | |
| def extract_and_parse_json(self, response: str): | |
| if response.startswith("```"): | |
| response = response.strip("`").strip() | |
| if response.startswith("json"): | |
| response = response[4:].strip() | |
| # Now response should be clean JSON text | |
| try: | |
| parsed = json.loads(response) | |
| except Exception: | |
| # If somehow still not JSON, return default empty | |
| parsed = { | |
| "Safety": "", | |
| "Score": "", | |
| "Unsafe Categories": [], | |
| } | |
| return parsed | |
| def check_safety(self, input_text): | |
| if not input_text.strip(): | |
| return "β οΈ Please enter some text to check." | |
| payload = {"inputs": input_text} | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {self.HF_API_TOKEN}" | |
| } | |
| try: | |
| response = requests.post(self.ENDPOINT_URL, json=payload, headers=headers, timeout=30) | |
| if response.status_code == 200: | |
| result_raw = response.json() | |
| if isinstance(result_raw, str): | |
| parsed_result = self.extract_and_parse_json(result_raw) | |
| else: | |
| parsed_result = result_raw | |
| safety = parsed_result.get("Safety", "Unknown") | |
| # score = parsed_result.get("Score", "") | |
| # categories = parsed_result.get("Unsafe Categories", "") | |
| is_safe = (safety.lower() == "safe") | |
| if is_safe: | |
| return f"β {result_raw}" | |
| else: | |
| return f"β {result_raw}" | |
| else: | |
| return f"β Error: Request failed with status code {response.status_code}.\nDetails: {response.text}" | |
| except requests.exceptions.Timeout: | |
| return "β Error: Request timed out." | |
| except requests.exceptions.ConnectionError: | |
| return "β Error: Failed to connect to the endpoint." | |
| except Exception as e: | |
| return f"β Error: {str(e)}" | |
| # Create an instance of SafetyChecker | |
| checker = SafetyChecker() | |
| # Build Gradio App | |
| with gr.Blocks(title="Safety Content Classifier", css="footer {display: none !important}") as demo: | |
| gr.Markdown("# Safety Content Classifier") | |
| gr.Markdown("## Connected to external safety model endpoint") | |
| token_status = gr.Markdown(is_token_configured()) | |
| with gr.Accordion("About this demo", open=False): | |
| gr.Markdown(""" | |
| This demo uses an external API endpoint to classify text based on safety policies. | |
| It checks content against the following categories: | |
| - Harassment | |
| - Dangerous Content | |
| - Hate Speech | |
| - Sexually Explicit Information | |
| The model will respond with 'Safe' or 'Unsafe' followed by any violated categories. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_text = gr.Textbox( | |
| label="Enter text to check", | |
| placeholder="Type here...", | |
| lines=5 | |
| ) | |
| check_button = gr.Button("Check Safety", variant="primary") | |
| with gr.Column(): | |
| output = gr.Textbox( | |
| label="Safety Result", | |
| lines=8 | |
| ) | |
| # Notice: Now we use checker.check_safety instead of check_safety directly | |
| check_button.click(fn=checker.check_safety, inputs=[input_text], outputs=output) | |
| input_text.submit(fn=checker.check_safety, inputs=[input_text], outputs=output) | |
| gr.Examples( | |
| [ | |
| ["Hello, how are you today?"], | |
| ["I want to learn how to make a bomb."], | |
| ["Let's meet for coffee tomorrow."], | |
| ["COVID vaccines are a secret plot by the government."], | |
| ], | |
| inputs=[input_text] | |
| ) | |
| demo.launch() | |