AIOmarRehan commited on
Commit
8c00eb3
Β·
verified Β·
1 Parent(s): 904154d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -19
app.py CHANGED
@@ -1,28 +1,41 @@
1
  import gradio as gr
2
  import numpy as np
3
- import librosa
4
  from PIL import Image
5
- import tempfile
6
- import os
7
  from app.preprocess import preprocess_audio
8
  from app.model import predict
9
  from collections import Counter, defaultdict
 
 
 
 
 
 
 
 
 
 
 
10
 
 
 
 
11
 
12
- # Process Image Input
 
 
 
 
 
13
  def process_image_input(img):
14
- # Classify a spectrogram image directly using model.predict
15
  label, confidence, probs = predict(img)
16
  return label, round(confidence, 3), probs
17
 
18
 
19
- # Process Audio Input
20
  def process_audio_input(audio_path):
21
- # audio_path = filepath from Gradio
22
- # Preprocess β†’ mel-spectrogram β†’ predict per chunk
23
 
24
- # Preprocess to mel-spectrogram chunk images
25
- imgs = preprocess_audio(audio_path)
26
 
27
  all_preds = []
28
  all_confs = []
@@ -34,7 +47,7 @@ def process_audio_input(audio_path):
34
  all_confs.append(conf)
35
  all_probs.append(probs)
36
 
37
- # Majority Vote
38
  counter = Counter(all_preds)
39
  max_count = max(counter.values())
40
  candidates = [k for k, v in counter.items() if v == max_count]
@@ -48,15 +61,17 @@ def process_audio_input(audio_path):
48
  conf_sums[label] += all_confs[i]
49
  final_label = max(conf_sums, key=conf_sums.get)
50
 
51
- final_conf = float(np.mean([all_confs[i] for i, l in enumerate(all_preds) if l == final_label]))
 
 
52
 
53
  return final_label, round(final_conf, 3), all_preds, [round(c, 3) for c in all_confs]
54
 
55
 
56
- # Main prediction logic
57
  def classify(audio_path, image):
58
 
59
- # If an image is provided β†’ classify directly
60
  if image is not None:
61
  label, conf, probs = process_image_input(image)
62
  return {
@@ -65,10 +80,9 @@ def classify(audio_path, image):
65
  "Details": probs
66
  }
67
 
68
- # If an audio file is provided β†’ preprocess and classify
69
  if audio_path is not None:
70
  label, conf, all_preds, all_confs = process_audio_input(audio_path)
71
-
72
  return {
73
  "Final Label": label,
74
  "Confidence": conf,
@@ -76,7 +90,6 @@ def classify(audio_path, image):
76
  "All Chunk Confidences": all_confs
77
  }
78
 
79
- # Neither provided
80
  return "Please upload an audio file OR a spectrogram image."
81
 
82
 
@@ -85,7 +98,7 @@ interface = gr.Interface(
85
  fn=classify,
86
  inputs=[
87
  gr.Audio(type="filepath", label="Upload Audio (WAV/MP3)"),
88
- gr.Image(type="pil", label="Upload Spectrogram Image")
89
  ],
90
  outputs=gr.JSON(label="Prediction Results"),
91
  title="General Audio Classifier (Audio + Spectrogram Support)",
@@ -93,7 +106,6 @@ interface = gr.Interface(
93
  "Upload a raw audio file OR a spectrogram image.\n"
94
  "If audio β†’ model preprocesses into mel-spectrogram chunks.\n"
95
  "If image β†’ model classifies the spectrogram directly.\n"
96
- "Built using CNN + Mel-Spectrogram + Gradio."
97
  ),
98
  )
99
 
 
1
  import gradio as gr
2
  import numpy as np
 
3
  from PIL import Image
 
 
4
  from app.preprocess import preprocess_audio
5
  from app.model import predict
6
  from collections import Counter, defaultdict
7
+ import librosa
8
+
9
+
10
+ # IMAGE HANDLING
11
+ def safe_load_image(img):
12
+ """
13
+ Ensure the input is a valid PIL RGBA image.
14
+ Gradio sometimes gives numpy arrays β†’ we convert safely.
15
+ """
16
+ if img is None:
17
+ return None
18
 
19
+ # If numpy array β†’ convert to PIL
20
+ if isinstance(img, np.ndarray):
21
+ img = Image.fromarray(img)
22
 
23
+ # Convert to RGBA, to make sure the Alpha channel keep
24
+ img = img.convert("RGBA")
25
+ return img
26
+
27
+
28
+ # PROCESS SPECTROGRAM IMAGE
29
  def process_image_input(img):
30
+ img = safe_load_image(img)
31
  label, confidence, probs = predict(img)
32
  return label, round(confidence, 3), probs
33
 
34
 
35
+ # PROCESS RAW AUDIO
36
  def process_audio_input(audio_path):
 
 
37
 
38
+ imgs = preprocess_audio(audio_path) # returns list of PIL RGBA images
 
39
 
40
  all_preds = []
41
  all_confs = []
 
47
  all_confs.append(conf)
48
  all_probs.append(probs)
49
 
50
+ # Majority vote
51
  counter = Counter(all_preds)
52
  max_count = max(counter.values())
53
  candidates = [k for k, v in counter.items() if v == max_count]
 
61
  conf_sums[label] += all_confs[i]
62
  final_label = max(conf_sums, key=conf_sums.get)
63
 
64
+ final_conf = float(
65
+ np.mean([all_confs[i] for i, lbl in enumerate(all_preds) if lbl == final_label])
66
+ )
67
 
68
  return final_label, round(final_conf, 3), all_preds, [round(c, 3) for c in all_confs]
69
 
70
 
71
+ # MAIN CLASSIFIER
72
  def classify(audio_path, image):
73
 
74
+ # If spectrogram image
75
  if image is not None:
76
  label, conf, probs = process_image_input(image)
77
  return {
 
80
  "Details": probs
81
  }
82
 
83
+ # If raw audio
84
  if audio_path is not None:
85
  label, conf, all_preds, all_confs = process_audio_input(audio_path)
 
86
  return {
87
  "Final Label": label,
88
  "Confidence": conf,
 
90
  "All Chunk Confidences": all_confs
91
  }
92
 
 
93
  return "Please upload an audio file OR a spectrogram image."
94
 
95
 
 
98
  fn=classify,
99
  inputs=[
100
  gr.Audio(type="filepath", label="Upload Audio (WAV/MP3)"),
101
+ gr.Image(type="pil", label="Upload Spectrogram Image (PNG RGBA Supported)")
102
  ],
103
  outputs=gr.JSON(label="Prediction Results"),
104
  title="General Audio Classifier (Audio + Spectrogram Support)",
 
106
  "Upload a raw audio file OR a spectrogram image.\n"
107
  "If audio β†’ model preprocesses into mel-spectrogram chunks.\n"
108
  "If image β†’ model classifies the spectrogram directly.\n"
 
109
  ),
110
  )
111