Spaces:
Runtime error
Runtime error
bills
commited on
Commit
·
43bf6e9
1
Parent(s):
50bc9ac
Add a lot of changes
Browse files- __pycache__/multiapp.cpython-37.pyc +0 -0
- app.py +14 -64
- apps/__pycache__/fine_tune.cpython-37.pyc +0 -0
- apps/__pycache__/home.cpython-37.pyc +0 -0
- apps/__pycache__/main_model.cpython-37.pyc +0 -0
- apps/fine_tune.py +31 -0
- apps/home.py +40 -0
- apps/main_model.py +32 -0
- best.pt → apps/model/main_model.pt +0 -0
- multiapp.py +80 -0
__pycache__/multiapp.cpython-37.pyc
ADDED
Binary file (3.24 kB). View file
|
|
app.py
CHANGED
@@ -1,74 +1,24 @@
|
|
1 |
-
import time
|
2 |
-
from turtle import width
|
3 |
-
import torch
|
4 |
-
import folium
|
5 |
-
import numpy as np
|
6 |
-
import pandas as pd
|
7 |
import streamlit as st
|
8 |
-
|
9 |
-
from
|
10 |
-
|
|
|
|
|
|
|
|
|
11 |
st.set_page_config(
|
12 |
page_title="Ship Detection using YOLOv5 Medium Model",
|
13 |
page_icon=":ship:",
|
14 |
layout="wide"
|
15 |
)
|
16 |
|
17 |
-
|
18 |
-
st.markdown(
|
19 |
-
"""
|
20 |
-
This application is build based on YOLOv5 with extral large model. User just
|
21 |
-
upload an image, and press the 'Predict' button to make a prediction base on
|
22 |
-
a training model before.
|
23 |
-
|
24 |
-
### For more information, please visit:
|
25 |
-
|
26 |
-
- Check out [my github](https://github.com/bills1912)
|
27 |
-
- Jump into YOLOv5 [documentation](https://docs.ultralytics.com/)
|
28 |
-
|
29 |
-
"""
|
30 |
-
)
|
31 |
-
|
32 |
-
st.write("## Ship Imagery Prediction")
|
33 |
-
map_col1, map_col2, map_col3 = st.columns(3)
|
34 |
-
|
35 |
-
ais = pd.read_csv("https://raw.githubusercontent.com/bills1912/marin-vessels-detection/main/data/MarineTraffic_VesselExport_2022-11-25.csv")
|
36 |
-
ais_jakarta = ais[ais['Destination Port'] == 'JAKARTA']
|
37 |
-
ais_list = ais_jakarta.values.tolist()
|
38 |
-
f = folium.Figure(width=1000, height=500)
|
39 |
-
jakarta_vessels = folium.Map(location=[-5.626954250925966, 106.70735731868719], zoom_start=8).add_to(f)
|
40 |
-
ais_data = folium.FeatureGroup(name="marine_vessels")
|
41 |
-
mCluster = MarkerCluster(name="Marine Vessels")
|
42 |
-
for i in ais_list:
|
43 |
-
html = f"<h3>{i[1]}</h3> Vessel Type: {i[8]} </br> Destination Port: {i[2]} </br> Reported Destination: {i[4]} </br> Current Port: {i[6]}\
|
44 |
-
</br> Latitude: {i[10]} </br> Longitude: {i[11]}"
|
45 |
-
iframe = folium.IFrame(html)
|
46 |
-
popup = folium.Popup(iframe, min_width=250, max_width=300)
|
47 |
-
ais_data.add_child(mCluster.add_child(folium.Marker(location=[i[10], i[11]], popup=popup, icon=folium.Icon(color="black", icon="ship", prefix="fa"))))
|
48 |
-
jakarta_vessels.add_child(ais_data)
|
49 |
-
folium_static(jakarta_vessels, width=1370, height=700)
|
50 |
-
|
51 |
|
52 |
-
|
53 |
-
eval_col1, eval_col2, eval_col3, eval_col4 = st.columns(spec=4)
|
54 |
-
eval_col1.metric("Precision", "89.52%")
|
55 |
-
eval_col2.metric("Recall", "83.54%")
|
56 |
-
eval_col3.metric("mAP 0.5", "85.39%")
|
57 |
-
eval_col4.metric("mAP 0.5:0.95", "62.63%")
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
folder_path = st.text_input("Image path",
|
63 |
-
help="This field the image path field that the model will predict the object inside the image that we have uploaded",
|
64 |
-
placeholder="Copy the path of image to this field")
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
ship_model = torch.hub.load('ultralytics/yolov5', 'custom', path="best.pt", force_reload=True)
|
69 |
-
results = ship_model(f"{folder_path}")
|
70 |
-
with st.spinner("Loading..."):
|
71 |
-
time.sleep(3.5)
|
72 |
-
st.success("Done!")
|
73 |
-
st.image(np.squeeze(results.render()))
|
74 |
-
results.print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import streamlit as st
|
3 |
+
from multiapp import MultiApp
|
4 |
+
from apps import (
|
5 |
+
home,
|
6 |
+
main_model,
|
7 |
+
fine_tune
|
8 |
+
)
|
9 |
st.set_page_config(
|
10 |
page_title="Ship Detection using YOLOv5 Medium Model",
|
11 |
page_icon=":ship:",
|
12 |
layout="wide"
|
13 |
)
|
14 |
|
15 |
+
apps = MultiApp()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Add all your application here
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
apps.add_app("Home", home.app)
|
20 |
+
apps.add_app("YOLOv5x6 Model", main_model.app)
|
21 |
+
apps.add_app("Fine-Tuning Model", fine_tune.app)
|
|
|
|
|
|
|
22 |
|
23 |
+
# The main app
|
24 |
+
apps.run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
apps/__pycache__/fine_tune.cpython-37.pyc
ADDED
Binary file (1.42 kB). View file
|
|
apps/__pycache__/home.cpython-37.pyc
ADDED
Binary file (2.12 kB). View file
|
|
apps/__pycache__/main_model.cpython-37.pyc
ADDED
Binary file (1.45 kB). View file
|
|
apps/fine_tune.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import streamlit as st
|
5 |
+
|
6 |
+
def app():
|
7 |
+
st.write("## Fine-Tuning Model Prediction")
|
8 |
+
st.write("### Model evaluation:")
|
9 |
+
eval_col1, eval_col2, eval_col3, eval_col4, eval_col5 = st.columns(spec=5)
|
10 |
+
eval_col1.metric("Precision", "99.03%")
|
11 |
+
eval_col2.metric("Recall", "98.39%")
|
12 |
+
eval_col3.metric("F1-Score", "98.71%")
|
13 |
+
eval_col4.metric("mAP 0.5", "98.96%")
|
14 |
+
eval_col5.metric("mAP 0.5:0.95", "69.61%")
|
15 |
+
|
16 |
+
uploaded_file = st.file_uploader("Choose a ship imagery")
|
17 |
+
if uploaded_file is not None:
|
18 |
+
st.image(uploaded_file, caption='Image to predict')
|
19 |
+
folder_path = st.text_input("Image path",
|
20 |
+
help="This field the image path field that the model will predict the object inside the image that we have uploaded",
|
21 |
+
placeholder="Copy the path of image to this field")
|
22 |
+
|
23 |
+
prediction = st.button("Predict")
|
24 |
+
if prediction:
|
25 |
+
ship_model = torch.hub.load('ultralytics/yolov5', 'custom', path="apps/model/main_model.pt", force_reload=True)
|
26 |
+
results = ship_model(f"{folder_path}")
|
27 |
+
with st.spinner("Loading..."):
|
28 |
+
time.sleep(3.5)
|
29 |
+
st.success("Done!")
|
30 |
+
st.image(np.squeeze(results.render()))
|
31 |
+
results.print()
|
apps/home.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from turtle import width
|
3 |
+
import torch
|
4 |
+
import folium
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import streamlit as st
|
8 |
+
from folium.plugins import MarkerCluster
|
9 |
+
from streamlit_folium import folium_static
|
10 |
+
|
11 |
+
def app():
|
12 |
+
st.write("# Welcome to Ship Detection Application! :satellite:")
|
13 |
+
st.markdown(
|
14 |
+
"""
|
15 |
+
This application is build based on YOLOv5 with extral large model. User just
|
16 |
+
upload an image, and press the 'Predict' button to make a prediction base on
|
17 |
+
a training model before.
|
18 |
+
|
19 |
+
### For more information, please visit:
|
20 |
+
|
21 |
+
- Check out [my github](https://github.com/bills1912)
|
22 |
+
- Jump into YOLOv5 [documentation](https://docs.ultralytics.com/)
|
23 |
+
|
24 |
+
"""
|
25 |
+
)
|
26 |
+
ais = pd.read_csv("https://raw.githubusercontent.com/bills1912/marin-vessels-detection/main/data/MarineTraffic_VesselExport_2022-11-25.csv")
|
27 |
+
ais_jakarta = ais[ais['Destination Port'] == 'JAKARTA']
|
28 |
+
ais_list = ais_jakarta.values.tolist()
|
29 |
+
f = folium.Figure(width=1000, height=500)
|
30 |
+
jakarta_vessels = folium.Map(location=[-5.626954250925966, 106.70735731868719], zoom_start=8).add_to(f)
|
31 |
+
ais_data = folium.FeatureGroup(name="marine_vessels")
|
32 |
+
mCluster = MarkerCluster(name="Marine Vessels")
|
33 |
+
for i in ais_list:
|
34 |
+
html = f"<h3>{i[1]}</h3> Vessel Type: {i[8]} </br> Destination Port: {i[2]} </br> Reported Destination: {i[4]} </br> Current Port: {i[6]}\
|
35 |
+
</br> Latitude: {i[10]} </br> Longitude: {i[11]}"
|
36 |
+
iframe = folium.IFrame(html)
|
37 |
+
popup = folium.Popup(iframe, min_width=250, max_width=300)
|
38 |
+
ais_data.add_child(mCluster.add_child(folium.Marker(location=[i[10], i[11]], popup=popup, icon=folium.Icon(color="black", icon="ship", prefix="fa"))))
|
39 |
+
jakarta_vessels.add_child(ais_data)
|
40 |
+
folium_static(jakarta_vessels, width=1370, height=700)
|
apps/main_model.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from turtle import width
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import streamlit as st
|
6 |
+
|
7 |
+
def app():
|
8 |
+
st.write("## Ship Imagery Prediction")
|
9 |
+
st.write("### Model evaluation:")
|
10 |
+
eval_col1, eval_col2, eval_col3, eval_col4, eval_col5 = st.columns(spec=5)
|
11 |
+
eval_col1.metric("Precision", "89.52%")
|
12 |
+
eval_col2.metric("Recall", "83.54%")
|
13 |
+
eval_col3.metric("F1-Score", "86.43%")
|
14 |
+
eval_col4.metric("mAP 0.5", "85.39%")
|
15 |
+
eval_col5.metric("mAP 0.5:0.95", "62.63%")
|
16 |
+
|
17 |
+
uploaded_file = st.file_uploader("Choose a ship imagery")
|
18 |
+
if uploaded_file is not None:
|
19 |
+
st.image(uploaded_file, caption='Image to predict')
|
20 |
+
folder_path = st.text_input("Image path",
|
21 |
+
help="This field the image path field that the model will predict the object inside the image that we have uploaded",
|
22 |
+
placeholder="Copy the path of image to this field")
|
23 |
+
|
24 |
+
prediction = st.button("Predict")
|
25 |
+
if prediction:
|
26 |
+
ship_model = torch.hub.load('ultralytics/yolov5', 'custom', path="apps/model/main_model.pt", force_reload=True)
|
27 |
+
results = ship_model(f"{folder_path}")
|
28 |
+
with st.spinner("Loading..."):
|
29 |
+
time.sleep(3.5)
|
30 |
+
st.success("Done!")
|
31 |
+
st.image(np.squeeze(results.render()))
|
32 |
+
results.print()
|
best.pt → apps/model/main_model.pt
RENAMED
File without changes
|
multiapp.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Frameworks for running multiple Streamlit applications as a single app.
|
2 |
+
"""
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
# app_state = st.experimental_get_query_params()
|
6 |
+
# app_state = {k: v[0] if isinstance(v, list) else v for k, v in app_state.items()} # fetch the first item in each query string as we don't have multiple values for each query string key in this example
|
7 |
+
|
8 |
+
|
9 |
+
class MultiApp:
|
10 |
+
"""Framework for combining multiple streamlit applications.
|
11 |
+
Usage:
|
12 |
+
def foo():
|
13 |
+
st.title("Hello Foo")
|
14 |
+
def bar():
|
15 |
+
st.title("Hello Bar")
|
16 |
+
app = MultiApp()
|
17 |
+
app.add_app("Foo", foo)
|
18 |
+
app.add_app("Bar", bar)
|
19 |
+
app.run()
|
20 |
+
It is also possible keep each application in a separate file.
|
21 |
+
import foo
|
22 |
+
import bar
|
23 |
+
app = MultiApp()
|
24 |
+
app.add_app("Foo", foo.app)
|
25 |
+
app.add_app("Bar", bar.app)
|
26 |
+
app.run()
|
27 |
+
"""
|
28 |
+
|
29 |
+
def __init__(self):
|
30 |
+
self.apps = []
|
31 |
+
|
32 |
+
def add_app(self, title, func):
|
33 |
+
"""Adds a new application.
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
func:
|
37 |
+
the python function to render this app.
|
38 |
+
title:
|
39 |
+
title of the app. Appears in the dropdown in the sidebar.
|
40 |
+
"""
|
41 |
+
self.apps.append({"title": title, "function": func})
|
42 |
+
|
43 |
+
def run(self):
|
44 |
+
app_state = st.experimental_get_query_params()
|
45 |
+
app_state = {
|
46 |
+
k: v[0] if isinstance(v, list) else v for k, v in app_state.items()
|
47 |
+
} # fetch the first item in each query string as we don't have multiple values for each query string key in this example
|
48 |
+
|
49 |
+
# st.write('before', app_state)
|
50 |
+
|
51 |
+
titles = [a["title"] for a in self.apps]
|
52 |
+
functions = [a["function"] for a in self.apps]
|
53 |
+
default_radio = titles.index(app_state["page"]) if "page" in app_state else 0
|
54 |
+
|
55 |
+
st.sidebar.title("Navigation")
|
56 |
+
|
57 |
+
title = st.sidebar.radio("Go To", titles, index=default_radio, key="radio")
|
58 |
+
|
59 |
+
app_state["page"] = st.session_state.radio
|
60 |
+
# st.write('after', app_state)
|
61 |
+
|
62 |
+
st.experimental_set_query_params(**app_state)
|
63 |
+
functions[titles.index(title)]()
|
64 |
+
|
65 |
+
st.sidebar.title("Contribute")
|
66 |
+
st.sidebar.info(
|
67 |
+
"This is an open source project and you are very welcome to contribute your "
|
68 |
+
"comments, questions, resources and apps as "
|
69 |
+
"[issues](https://github.com/giswqs/streamlit-geospatial/issues) or "
|
70 |
+
"[pull requests](https://github.com/giswqs/streamlit-geospatial/pulls) "
|
71 |
+
"to the [source code](https://github.com/giswqs/streamlit-geospatial). "
|
72 |
+
)
|
73 |
+
st.sidebar.title("About")
|
74 |
+
st.sidebar.info(
|
75 |
+
"""
|
76 |
+
This web [app](https://share.streamlit.io/giswqs/streamlit-geospatial/app.py) is maintained by [Qiusheng Wu](https://wetlands.io). You can follow me on social media:
|
77 |
+
[GitHub](https://github.com/giswqs) | [Twitter](https://twitter.com/giswqs) | [YouTube](https://www.youtube.com/c/QiushengWu) | [LinkedIn](https://www.linkedin.com/in/qiushengwu).
|
78 |
+
This web app URL: <https://streamlit.gishub.org>
|
79 |
+
"""
|
80 |
+
)
|