File size: 1,483 Bytes
474d9f8
 
7bfd230
474d9f8
 
386812f
474d9f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674d60f
 
 
 
 
 
474d9f8
 
 
 
96e7d44
474d9f8
96e7d44
 
474d9f8
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
from transformers import TFDistilBertForSequenceClassification, DistilBertTokenizerFast
import tensorflow as tf

# Load the model and tokenizer from Hugging Face Hub
model_name = "Buebito/HamOrSpam_Model"  # Replace with your actual model path
tokenizer = DistilBertTokenizerFast.from_pretrained(model_name)
model = TFDistilBertForSequenceClassification.from_pretrained(model_name)

def classify_text(text):
    # Tokenize the text input and prepare it for the model
    inputs = tokenizer(text, return_tensors="tf", truncation=True, padding=True, max_length=512)

    # Get model predictions
    predictions = model(inputs.data)[0]

    # Convert predictions to probabilities using softmax
    probabilities = tf.nn.softmax(predictions, axis=-1)

    # Get the higher probability index
    prediction_index = tf.argmax(probabilities, axis=-1).numpy()[0]

    # Convert the index to label
    label = "ham" if prediction_index == 0 else "spam"

    # Get the probabilities of each class
    ham_prob = probabilities[0][0].numpy()
    spam_prob = probabilities[0][1].numpy()

    # Return the label and the probabilities separately
    return label, {"ham": ham_prob, "spam": spam_prob}

# Create the Gradio interface
iface = gr.Interface(
    fn=classify_text,
    inputs=gr.Textbox(lines=2, placeholder="Enter Text Here..."),
    outputs=[
        gr.Label(label="Classification"),
        gr.JSON(label="Probabilities")
    ]
)

# Launch the app
iface.launch()