Thomas Chaigneau commited on
Commit
fe1f7ad
1 Parent(s): 1fcd057

update app

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -20,12 +20,11 @@ from huggingface_hub import from_pretrained_keras
20
  IMG_SIZE = 224
21
  NUM_FEATURES = 2048
22
 
23
- model = from_pretrained_keras("ChainYo/video-classification-cnn-rnn")
24
  samples = []
25
  for file in os.listdir("samples"):
26
- print(file)
27
  tag = file.split("_")[1]
28
- samples.append([f"samples/{file}", 25])
29
 
30
 
31
  def crop_center_square(frame):
@@ -96,9 +95,10 @@ def sequence_prediction(path):
96
  frame_features, frame_mask = prepare_video(frames)
97
  probabilities = model.predict([frame_features, frame_mask])[0]
98
 
 
99
  for i in np.argsort(probabilities)[::-1]:
100
- print(f" {class_vocab[i]}: {probabilities[i] * 100:5.2f}%")
101
- return frames
102
 
103
 
104
  def to_gif(images):
@@ -109,10 +109,10 @@ def to_gif(images):
109
  article = article = "<div style='text-align: center;'><a href='https://github.com/ChainYo' target='_blank'>Space by Thomas Chaigneau</a><br><a href='https://keras.io/examples/vision/video_classification/' target='_blank'>Keras example by Sayak Paul</a></div>"
110
  app = gr.Interface(
111
  sequence_prediction,
112
- inputs=[gr.inputs.Video(label="Video", type="mp4")],
113
- outputs=[],
114
  title="Keras Video Classification CNN-RNN model",
115
  description="Keras Working Group",
116
  article=article,
117
  examples=samples
118
- ).launch(enable_queue=True, cache_examples=True)
 
20
  IMG_SIZE = 224
21
  NUM_FEATURES = 2048
22
 
23
+ model = from_pretrained_keras("keras-io/video-classification-cnn-rnn")
24
  samples = []
25
  for file in os.listdir("samples"):
 
26
  tag = file.split("_")[1]
27
+ samples.append([f"samples/{file}", 20])
28
 
29
 
30
  def crop_center_square(frame):
 
95
  frame_features, frame_mask = prepare_video(frames)
96
  probabilities = model.predict([frame_features, frame_mask])[0]
97
 
98
+ preds = {}
99
  for i in np.argsort(probabilities)[::-1]:
100
+ preds[class_vocab[i]] = float(probabilities[i])
101
+ return preds
102
 
103
 
104
  def to_gif(images):
 
109
  article = article = "<div style='text-align: center;'><a href='https://github.com/ChainYo' target='_blank'>Space by Thomas Chaigneau</a><br><a href='https://keras.io/examples/vision/video_classification/' target='_blank'>Keras example by Sayak Paul</a></div>"
110
  app = gr.Interface(
111
  sequence_prediction,
112
+ inputs=[gr.inputs.Video(label="Video", type="avi")],
113
+ outputs=[gr.outputs.Label(label="Prediction", type="confidences")],
114
  title="Keras Video Classification CNN-RNN model",
115
  description="Keras Working Group",
116
  article=article,
117
  examples=samples
118
+ ).launch(enable_queue=True)