# -*- coding: utf-8 -*- """app.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1CbDOX8PDJB6ZyLZiLMXbPyr6k7dvrs20 """ !pip install gradio import gradio as gr import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer # Load the model and tokenizer model_name = "qarib/bert-base-qarib" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2) # Preprocessing function def light_preprocess(text): text = text.replace("@USER", "").replace("RT", "").strip() return text # Prediction function def predict_offensive(text): preprocessed_text = light_preprocess(text) inputs = tokenizer(preprocessed_text, return_tensors="pt", truncation=True, padding=True) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits predicted_class = torch.argmax(logits, dim=1).item() return "Offensive" if predicted_class == 1 else "Not Offensive" # Create the Gradio interface iface = gr.Interface( fn=predict_offensive, inputs=gr.Textbox(lines=2, placeholder="Enter text here..."), outputs="text", title="Offensive Language Detection", description="Enter a text to check if it's offensive or not.", ) # Launch the interface iface.launch()