ALVHB95 commited on
Commit
9261560
1 Parent(s): 60a9ab7
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -36,9 +36,36 @@ from huggingface_hub import from_pretrained_keras
36
 
37
  import tensorflow as tf
38
  from tensorflow import keras
 
39
 
40
  # Cell 1: Image Classification Model
41
- model1 = from_pretrained_keras("rocioadlc/EfficientNetV2L")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  def predict_image(input_img):
44
  predictions = model1.predict(input_img)
@@ -47,7 +74,7 @@ def predict_image(input_img):
47
  image_gradio_app = gr.Interface(
48
  fn=predict_image,
49
  inputs=gr.Image(label="Select waste candidate", sources=['upload', 'webcam'], type="pil"),
50
- outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
51
  title="What kind of waste do you have?",
52
  )
53
 
 
36
 
37
  import tensorflow as tf
38
  from tensorflow import keras
39
+ from PIL import Image
40
 
41
  # Cell 1: Image Classification Model
42
+ model1 = from_pretrained_keras("ALVHB95/finalsupermodelofthedestiny")
43
+
44
+ # Define class labels
45
+ class_labels = ['cardboard', 'compost', 'glass', 'metal', 'paper', 'plastic', 'trash']
46
+
47
+ # Function to predict image label and score
48
+ def predict_image(input):
49
+ # Resize the image to the size expected by the model
50
+ image = input.resize((224, 224))
51
+ # Convert the image to a NumPy array
52
+ image_array = tf.keras.preprocessing.image.img_to_array(image)
53
+ # Normalize the image
54
+ image_array /= 255.0
55
+ # Expand the dimensions to create a batch
56
+ image_array = tf.expand_dims(image_array, 0)
57
+ # Predict using the model
58
+ predictions = model1.predict(image_array)
59
+
60
+ # Get the predicted class label
61
+ predicted_class_index = tf.argmax(predictions, axis=1).numpy()[0]
62
+ predicted_class_label = class_labels[predicted_class_index]
63
+
64
+ # Get the confidence score of the predicted class
65
+ confidence_score = predictions[0][predicted_class_index]
66
+
67
+ # Return input image path, predicted class label, and confidence score
68
+ return input, {predicted_class_label: confidence_score}
69
 
70
  def predict_image(input_img):
71
  predictions = model1.predict(input_img)
 
74
  image_gradio_app = gr.Interface(
75
  fn=predict_image,
76
  inputs=gr.Image(label="Select waste candidate", sources=['upload', 'webcam'], type="pil"),
77
+ outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result")],
78
  title="What kind of waste do you have?",
79
  )
80