Pratik-hf commited on
Commit
b99fcfe
1 Parent(s): a365d9d

initial commit

Browse files
BiLSTM_INAPPRO_TEXT_CLASSIFIER.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:300a4b3a08be40174f2edd855f854bbc4372ac92f0d5cadf466e4fe5a572cbe1
3
+ size 22128040
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from keras.models import load_model
3
+ from tensorflow.keras.preprocessing.text import Tokenizer
4
+ from tensorflow.keras.preprocessing.text import tokenizer_from_json
5
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
6
+ from transformers import pipeline
7
+ import torch
8
+ from torchvision import transforms
9
+ from PIL import Image
10
+ import numpy as np
11
+ import json
12
+ import os
13
+
14
+
15
+ # Loading the saved tokenizer and model
16
+ with open('tokenizer.json') as f:
17
+ data = json.load(f)
18
+ tokenizer = tokenizer_from_json(data)
19
+
20
+ loaded_model = load_model("BiLSTM_INAPPRO_TEXT_CLASSIFIER.h5")
21
+
22
+ # Function to classify text
23
+ def classify_text(text):
24
+ # Tokenize and pad sequences
25
+ sequence = tokenizer.texts_to_sequences([text])
26
+ padded_sequence = pad_sequences(sequence, maxlen=128)
27
+
28
+ result = loaded_model.predict(padded_sequence)
29
+ print(result)
30
+
31
+ if result[0][0] >= 0.5:
32
+ label = "Inappropriate"
33
+ else:
34
+ label = "Appropriate"
35
+
36
+ return [round(result[0][0], 4)*100, label]
37
+
38
+
39
+ model = pipeline("image-classification", model="Pratik-hf/Inappropriate-image-classification-using-ViT")
40
+
41
+ # Function to classify image
42
+ def classify_image(image):
43
+ print(image)
44
+ # Forward pass
45
+ with torch.no_grad():
46
+ outputs = model(image)
47
+
48
+ # Get predicted class probabilities
49
+ # Get the label with the highest probabilities
50
+ prediction = max(outputs, key=lambda x: x['score'])
51
+
52
+ if prediction['label'] == "LABEL_0":
53
+ prediction_label = "Safe"
54
+ else:
55
+ prediction_label = "Unsafe"
56
+
57
+ # Print predicted probabilities for each class
58
+ print("Predicted probabilities:", prediction)
59
+
60
+ return [round(prediction['score'], 4)*100, prediction_label]
61
+
62
+
63
+
64
+ # Define Gradio interface
65
+ def classify_inputs(text=None, image=None):
66
+ if text is not None:
67
+ text_result = classify_text(text)
68
+ if image is not None:
69
+ image_result = classify_image(image)
70
+ return text_result, image_result
71
+
72
+
73
+
74
+
75
+ with gr.Blocks() as demo:
76
+ with gr.Tab("Text"):
77
+ gr.Markdown(
78
+ """
79
+ # Inappropriate text Detction
80
+ Give input below to see the output.
81
+ """)
82
+ text_input = gr.Textbox(label="Input Text", lines=5)
83
+ btn1 = gr.Button("Classify Text")
84
+ with gr.Row():
85
+ output_text_percentage = gr.Text(label="Percentage")
86
+ output_text_label = gr.Text(label="Label")
87
+ btn1.click(fn=classify_text, inputs=text_input, outputs=[output_text_percentage, output_text_label])
88
+ with gr.Tab("Image"):
89
+ gr.Markdown(
90
+ """
91
+ # Inappropriate Image Detction
92
+ Give input below to see the output.
93
+ """)
94
+ image_input = gr.Image(type="pil")
95
+ btn2 = gr.Button("Classify Image")
96
+ with gr.Row():
97
+ output_image_percentage = gr.Text(label="Percentage")
98
+ output_image_label = gr.Text(label="Label")
99
+ btn2.click(fn=classify_image, inputs=image_input, outputs=[output_image_percentage, output_image_label] )
100
+
101
+ demo.launch()
102
+
requirements.txt ADDED
Binary file (3.13 kB). View file
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff