import gradio as gr import subprocess import numpy as np # Install the required libraries if not already installed required_libraries = ["transformers", "torch"] for lib in required_libraries: try: __import__(lib) except ImportError: subprocess.run(["pip", "install", lib]) import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification # Replace 'model_name' with the name of your model or its path model_name = "Norah-K/PDPL_CAMeLBERT" # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) # Load the model model = AutoModelForSequenceClassification.from_pretrained(model_name) # Define a function for model inference def predict(text): # Tokenize the input text inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) # Perform inference with the model with torch.no_grad(): output = model(**inputs) # Assuming binary classification, you can get the predicted class probabilities class_probabilities = torch.softmax(output.logits, dim=1).tolist()[0] # If you have a label mapping, you can map the class probabilities to labels label_mapping = { 0: "موافقة المستخدم", 1: "بيانات التواصل أو تحت السن القانوني", 2: "جمع ومعالجة البيانات", 3: "الاحتفاظ بالبيانات", 4: "حماية البيانات", 5: "مشاركة البيانات", 6: "حقوق المستخدم", 7: "الاعلانات", 8: "تنبيهات الاختراق", 9: "المسؤوليات", } predicted_label = label_mapping[class_probabilities.index(max(class_probabilities))] return predicted_label # Create a Gradio interface iface = gr.Interface(fn=predict, inputs="text", outputs="text") # Launch the Gradio interface iface.launch()