SamT6
commited on
Commit
β’
5943ce0
1
Parent(s):
6b781e5
init
Browse files- .gitattributes +1 -0
- README.md +3 -3
- app.py +79 -0
- requirements.txt +3 -0
- sample_images/earthquake.png +3 -0
- sample_images/hurricane.png +3 -0
- sample_images/tsunami.png +3 -0
- sample_images/volcano.png +3 -0
- sample_images/wildfire.png +3 -0
.gitattributes
CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
title: Soteria
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
app_file: app.py
|
8 |
pinned: false
|
|
|
1 |
---
|
2 |
title: Soteria
|
3 |
+
emoji: π
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: gray
|
6 |
sdk: gradio
|
7 |
app_file: app.py
|
8 |
pinned: false
|
app.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from matplotlib.pyplot import title
|
3 |
+
import numpy as np
|
4 |
+
import tensorflow as tf
|
5 |
+
import random
|
6 |
+
from tensorflow import keras
|
7 |
+
import json
|
8 |
+
import requests
|
9 |
+
|
10 |
+
def get_rest_url(model_name, host='127.0.0.1', port='8501', verb='predict'):
|
11 |
+
url = 'http://{0}:{1}/v1/models/{2}:predict'.format(host, port, model_name)
|
12 |
+
|
13 |
+
return url
|
14 |
+
|
15 |
+
def rest_request(data, url):
|
16 |
+
payload = json.dumps({'instances': data.tolist()})
|
17 |
+
response = requests.post(url=url, data=payload)
|
18 |
+
return response
|
19 |
+
|
20 |
+
|
21 |
+
damage_types = np.array(sorted(['disaster happened', 'no disaster happened']))
|
22 |
+
disaster_types = np.array(sorted(['volcano', 'flooding', 'earthquake', 'fire', 'wind', 'tsunami']))
|
23 |
+
damage_levels = np.array(['no damage', 'minor damage', 'major damage', 'destroyed'])
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
def damage_classification(img):
|
28 |
+
# prediction = np.random.rand(1, 2)[0]
|
29 |
+
# return {damage_types[i]: prediction[i] for i in range(len(damage_types))}
|
30 |
+
|
31 |
+
image = np.zeros((1, 1024, 1024, 3), dtype=np.uint8)
|
32 |
+
image[0] = img
|
33 |
+
results = json.loads(rest_request(image, get_rest_url(model_name='binary-damage-classification-model', host='54.89.217.229')).content)
|
34 |
+
prediction = results['predictions'][0]
|
35 |
+
|
36 |
+
return {damage_types[i]: prediction[i] for i in range(len(damage_types))}
|
37 |
+
|
38 |
+
|
39 |
+
def disaster_classification(img):
|
40 |
+
image = np.zeros((1, 1024, 1024, 3), dtype=np.uint8)
|
41 |
+
image[0] = img
|
42 |
+
# prediction = model.predict(image).tolist()[0]
|
43 |
+
# prediction = np.random.rand(1, 6)[0]
|
44 |
+
results = json.loads(rest_request(image, get_rest_url(model_name='disaster-classification-model', host='3.86.228.238')).content)
|
45 |
+
prediction = results['predictions'][0]
|
46 |
+
|
47 |
+
return {disaster_types[i]: prediction[i] for i in range(len(disaster_types))}
|
48 |
+
|
49 |
+
|
50 |
+
def regional_damage_classification(img):
|
51 |
+
image = np.zeros((1, 1024, 1024, 3), dtype=np.uint8)
|
52 |
+
image[0] = img
|
53 |
+
results = json.loads(rest_request(image, get_rest_url(model_name='regional-damage-classification-model', host='54.145.173.193')).content)
|
54 |
+
prediction = results['predictions'][0]
|
55 |
+
|
56 |
+
return {damage_levels[i]: prediction[i] for i in range(len(damage_levels))}
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
iface = gr.Interface(
|
61 |
+
fn = [damage_classification, disaster_classification, regional_damage_classification],
|
62 |
+
inputs = gr.inputs.Image(shape=(1024, 1024), image_mode='RGB', invert_colors=False, source="upload", type='numpy'),
|
63 |
+
outputs = gr.outputs.Label(),
|
64 |
+
allow_screenshot=True,
|
65 |
+
allow_flagging='never',
|
66 |
+
examples=[
|
67 |
+
'./sample_images/hurricane.png',
|
68 |
+
'./sample_images/volcano.png',
|
69 |
+
'./sample_images/wildfire.png',
|
70 |
+
'./sample_images/earthquake.png',
|
71 |
+
'./sample_images/tsunami.png',
|
72 |
+
],
|
73 |
+
title="Soteria - AI for Natural Disaster Response",
|
74 |
+
description="""
|
75 |
+
Check out our project @ https://github.com/Soteria-ai/Soteria for more explantation! Demo below takes ~15 seconds to get the results.
|
76 |
+
""",
|
77 |
+
theme="grass",
|
78 |
+
)
|
79 |
+
iface.launch(share=False, show_error=True, inline=True, debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
numpy
|
2 |
+
tensorflow
|
3 |
+
cloudpathlib[s3]
|
sample_images/earthquake.png
ADDED
Git LFS Details
|
sample_images/hurricane.png
ADDED
Git LFS Details
|
sample_images/tsunami.png
ADDED
Git LFS Details
|
sample_images/volcano.png
ADDED
Git LFS Details
|
sample_images/wildfire.png
ADDED
Git LFS Details
|