Anshu13 commited on
Commit
8345f12
·
verified ·
1 Parent(s): 715c0eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -57
app.py CHANGED
@@ -1,57 +1,57 @@
1
- import torch
2
- import torchvision
3
- from torchvision import transforms
4
- import gradio as gr
5
- import os
6
- import cv2
7
- from PIL import Image
8
- from model import create_model
9
- model,transform=create_model(num_of classes=3)
10
- model.eval()
11
- def classify_video(video):
12
- cap = cv2.VideoCapture(video)
13
- predictions = []
14
- Fire=[]
15
- Smoke=[]
16
- Default=[]
17
- while cap.isOpened():
18
- ret, frame = cap.read()
19
- if not ret:
20
- break
21
- img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
22
- img_pil = Image.fromarray(img)
23
- img_tensor = transform(img_pil).unsqueeze(0)
24
- with torch.no_grad():
25
- output = model(img_tensor)
26
- pred = output.argmax().item()
27
- predictions.append(pred)
28
- cap.release()
29
- class_names=['DEFAULT', 'FIRE', 'SMOKE']
30
- for i in predictions:
31
- if i == 1:
32
- Fire.append(i)
33
- elif i == 2:
34
- Smoke.append(i)
35
- else:
36
- Default.append(i)
37
- if Fire!=[] and Smoke!=[]:
38
- return f"Spotted {class_names[1]} and {class_names[2]}"
39
- elif Fire!=[]:
40
- return f"Spotted {class_names[1]}"
41
- elif Smoke!=[]:
42
- return f"Spotted {class_names[2]}"
43
- else:
44
- return f"Spotted {class_names[0]}"
45
- Description="An MobileNET model trained to classify Fire and Smoke through Videos"
46
- Article="Created at jupyter NoteBook with GPU NVIDIA_GeForce_MX350"
47
- example_list=[["Examples/"+ example] for example in os.listdir("Examples")if example.endswith((".mp4", ".avi", ".mov"))]
48
- gr.Interface(
49
- fn=classify_video,
50
- inputs=gr.Video(streaming=True),
51
- outputs="text",
52
- title="Fire and Smoke Classifier",
53
- examples=example_list,
54
- description=description,
55
- article=article,
56
- live="True"
57
- ).launch()
 
1
+ import torch
2
+ import torchvision
3
+ from torchvision import transforms
4
+ import gradio as gr
5
+ import os
6
+ import cv2
7
+ from PIL import Image
8
+ from model import create_model
9
+ model,transform=create_model(num_of_classes=3)
10
+ model.eval()
11
+ def classify_video(video):
12
+ cap = cv2.VideoCapture(video)
13
+ predictions = []
14
+ Fire=[]
15
+ Smoke=[]
16
+ Default=[]
17
+ while cap.isOpened():
18
+ ret, frame = cap.read()
19
+ if not ret:
20
+ break
21
+ img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
22
+ img_pil = Image.fromarray(img)
23
+ img_tensor = transform(img_pil).unsqueeze(0)
24
+ with torch.no_grad():
25
+ output = model(img_tensor)
26
+ pred = output.argmax().item()
27
+ predictions.append(pred)
28
+ cap.release()
29
+ class_names=['DEFAULT', 'FIRE', 'SMOKE']
30
+ for i in predictions:
31
+ if i == 1:
32
+ Fire.append(i)
33
+ elif i == 2:
34
+ Smoke.append(i)
35
+ else:
36
+ Default.append(i)
37
+ if Fire!=[] and Smoke!=[]:
38
+ return f"Spotted {class_names[1]} and {class_names[2]}"
39
+ elif Fire!=[]:
40
+ return f"Spotted {class_names[1]}"
41
+ elif Smoke!=[]:
42
+ return f"Spotted {class_names[2]}"
43
+ else:
44
+ return f"Spotted {class_names[0]}"
45
+ Description="An MobileNET model trained to classify Fire and Smoke through Videos"
46
+ Article="Created at jupyter NoteBook with GPU NVIDIA_GeForce_MX350"
47
+ example_list=[["Examples/"+ example] for example in os.listdir("Examples")if example.endswith((".mp4", ".avi", ".mov"))]
48
+ gr.Interface(
49
+ fn=classify_video,
50
+ inputs=gr.Video(streaming=True),
51
+ outputs="text",
52
+ title="Fire and Smoke Classifier",
53
+ examples=example_list,
54
+ description=description,
55
+ article=article,
56
+ live="True"
57
+ ).launch()