Update src/streamlit_app.py
Browse files- src/streamlit_app.py +83 -19
src/streamlit_app.py
CHANGED
|
@@ -173,10 +173,11 @@ st.set_page_config(page_title="DeepSlide: Landslide Detection", layout="wide")
|
|
| 173 |
st.title("DeepSlide: Landslide Detection")
|
| 174 |
st.markdown("""
|
| 175 |
## Instructions
|
| 176 |
-
1.
|
| 177 |
-
2.
|
| 178 |
-
|
| 179 |
-
|
|
|
|
| 180 |
""")
|
| 181 |
|
| 182 |
# Sidebar for model selection
|
|
@@ -201,6 +202,30 @@ st.header("Upload Data")
|
|
| 201 |
if 'upload_errors' not in st.session_state:
|
| 202 |
st.session_state.upload_errors = []
|
| 203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
uploaded_files = st.file_uploader(
|
| 205 |
"Choose .h5 files...",
|
| 206 |
type="h5",
|
|
@@ -208,12 +233,63 @@ uploaded_files = st.file_uploader(
|
|
| 208 |
help="Upload your .h5 files here. Maximum file size is 200MB."
|
| 209 |
)
|
| 210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
if uploaded_files:
|
| 212 |
for uploaded_file in uploaded_files:
|
| 213 |
st.write(f"Processing file: {uploaded_file.name}")
|
| 214 |
st.write(f"File size: {uploaded_file.size} bytes")
|
| 215 |
|
| 216 |
-
with st.spinner('
|
| 217 |
try:
|
| 218 |
# Read the file directly using BytesIO
|
| 219 |
bytes_data = uploaded_file.getvalue()
|
|
@@ -228,18 +304,6 @@ if uploaded_files:
|
|
| 228 |
data[np.isnan(data)] = 0.000001
|
| 229 |
channels = config["dataset_config"]["channels"]
|
| 230 |
|
| 231 |
-
# Prepare image
|
| 232 |
-
# Assuming data shape is (14, 128, 128) based on typical satellite data or (128, 128, 14)
|
| 233 |
-
# The original code did: image[:, :, i] = data[band-1] implying data is (14, 128, 128) if accessed by index
|
| 234 |
-
# But later it did data[:, :, channel-1] in the else block?
|
| 235 |
-
# Let's check the original code logic again.
|
| 236 |
-
# Original code had two different logic blocks for data loading!
|
| 237 |
-
# Block 1 (single model): image[:, :, i] = data[band-1] -> implies data is (C, H, W)
|
| 238 |
-
# Block 2 (all models): image[:, :, i] = data[:, :, channel-1] -> implies data is (H, W, C)
|
| 239 |
-
|
| 240 |
-
# I will assume (C, H, W) is more standard for HDF5 'img' usually, but let's try to be robust or pick one.
|
| 241 |
-
# Given the inconsistency, I'll check data shape.
|
| 242 |
-
|
| 243 |
image = np.zeros((128, 128, len(channels)))
|
| 244 |
|
| 245 |
if data.ndim == 3:
|
|
@@ -259,7 +323,6 @@ if uploaded_files:
|
|
| 259 |
continue
|
| 260 |
|
| 261 |
# Prepare for model (Batch, Channel, Height, Width)
|
| 262 |
-
# image is currently (H, W, C)
|
| 263 |
image_display = image.transpose(2, 0, 1) # (C, H, W)
|
| 264 |
image_tensor = torch.from_numpy(image_display).unsqueeze(0).float() # (1, C, H, W)
|
| 265 |
|
|
@@ -275,4 +338,5 @@ if uploaded_files:
|
|
| 275 |
st.error(traceback.format_exc())
|
| 276 |
continue
|
| 277 |
|
| 278 |
-
|
|
|
|
|
|
| 173 |
st.title("DeepSlide: Landslide Detection")
|
| 174 |
st.markdown("""
|
| 175 |
## Instructions
|
| 176 |
+
1. **Model Selection**: Choose a single model from the sidebar or select "Run all models".
|
| 177 |
+
2. **Data Input**:
|
| 178 |
+
- Try an example image from the dropdown, or
|
| 179 |
+
- Upload your own .h5 files
|
| 180 |
+
3. **Results**: View predictions and download results as .npy files.
|
| 181 |
""")
|
| 182 |
|
| 183 |
# Sidebar for model selection
|
|
|
|
| 202 |
if 'upload_errors' not in st.session_state:
|
| 203 |
st.session_state.upload_errors = []
|
| 204 |
|
| 205 |
+
# Example images selection
|
| 206 |
+
st.subheader("Try Example Images")
|
| 207 |
+
examples_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "examples")
|
| 208 |
+
example_files = []
|
| 209 |
+
|
| 210 |
+
try:
|
| 211 |
+
if os.path.exists(examples_dir):
|
| 212 |
+
example_files = [f for f in os.listdir(examples_dir) if f.endswith('.h5')]
|
| 213 |
+
example_files.sort()
|
| 214 |
+
except:
|
| 215 |
+
pass
|
| 216 |
+
|
| 217 |
+
if example_files:
|
| 218 |
+
selected_example = st.selectbox(
|
| 219 |
+
"Select an example image to test:",
|
| 220 |
+
options=["None"] + example_files,
|
| 221 |
+
help="Choose an example .h5 file to quickly test the models"
|
| 222 |
+
)
|
| 223 |
+
else:
|
| 224 |
+
st.info("No example files found")
|
| 225 |
+
selected_example = "None"
|
| 226 |
+
|
| 227 |
+
# File upload section
|
| 228 |
+
st.subheader("Upload Your Own Files")
|
| 229 |
uploaded_files = st.file_uploader(
|
| 230 |
"Choose .h5 files...",
|
| 231 |
type="h5",
|
|
|
|
| 233 |
help="Upload your .h5 files here. Maximum file size is 200MB."
|
| 234 |
)
|
| 235 |
|
| 236 |
+
def process_h5_file(file_path, file_name):
|
| 237 |
+
"""Process a single h5 file"""
|
| 238 |
+
try:
|
| 239 |
+
with h5py.File(file_path, 'r') as hdf:
|
| 240 |
+
if 'img' not in hdf:
|
| 241 |
+
st.error(f"Error: 'img' dataset not found in {file_name}")
|
| 242 |
+
return
|
| 243 |
+
|
| 244 |
+
data = np.array(hdf.get('img'))
|
| 245 |
+
data[np.isnan(data)] = 0.000001
|
| 246 |
+
channels = config["dataset_config"]["channels"]
|
| 247 |
+
|
| 248 |
+
image = np.zeros((128, 128, len(channels)))
|
| 249 |
+
|
| 250 |
+
if data.ndim == 3:
|
| 251 |
+
if data.shape[0] == 14: # (C, H, W)
|
| 252 |
+
for i, band in enumerate(channels):
|
| 253 |
+
image[:, :, i] = data[band-1, :, :]
|
| 254 |
+
elif data.shape[2] == 14: # (H, W, C)
|
| 255 |
+
for i, band in enumerate(channels):
|
| 256 |
+
image[:, :, i] = data[:, :, band-1]
|
| 257 |
+
else:
|
| 258 |
+
st.warning(f"Unexpected data shape: {data.shape}. Assuming (C, H, W).")
|
| 259 |
+
for i, band in enumerate(channels):
|
| 260 |
+
if band-1 < data.shape[0]:
|
| 261 |
+
image[:, :, i] = data[band-1, :, :]
|
| 262 |
+
else:
|
| 263 |
+
st.error(f"Data has {data.ndim} dimensions, expected 3.")
|
| 264 |
+
return
|
| 265 |
+
|
| 266 |
+
# Prepare for model (Batch, Channel, Height, Width)
|
| 267 |
+
image_display = image.transpose(2, 0, 1) # (C, H, W)
|
| 268 |
+
image_tensor = torch.from_numpy(image_display).unsqueeze(0).float() # (1, C, H, W)
|
| 269 |
+
|
| 270 |
+
if model_option == "Select a single model":
|
| 271 |
+
process_and_visualize(selected_model_key, selected_model_info, image_tensor, image_display, file_name)
|
| 272 |
+
else:
|
| 273 |
+
for model_key, model_info in MODEL_DESCRIPTIONS.items():
|
| 274 |
+
process_and_visualize(model_key, model_info, image_tensor, image_display, file_name)
|
| 275 |
+
|
| 276 |
+
except Exception as e:
|
| 277 |
+
st.error(f"Error processing file {file_name}: {str(e)}")
|
| 278 |
+
|
| 279 |
+
# Process example file if selected
|
| 280 |
+
if selected_example != "None":
|
| 281 |
+
st.write(f"Processing example: {selected_example}")
|
| 282 |
+
example_path = os.path.join(examples_dir, selected_example)
|
| 283 |
+
with st.spinner(f'Processing {selected_example}...'):
|
| 284 |
+
process_h5_file(example_path, selected_example)
|
| 285 |
+
|
| 286 |
+
# Process uploaded files
|
| 287 |
if uploaded_files:
|
| 288 |
for uploaded_file in uploaded_files:
|
| 289 |
st.write(f"Processing file: {uploaded_file.name}")
|
| 290 |
st.write(f"File size: {uploaded_file.size} bytes")
|
| 291 |
|
| 292 |
+
with st.spinner('Processing...'):
|
| 293 |
try:
|
| 294 |
# Read the file directly using BytesIO
|
| 295 |
bytes_data = uploaded_file.getvalue()
|
|
|
|
| 304 |
data[np.isnan(data)] = 0.000001
|
| 305 |
channels = config["dataset_config"]["channels"]
|
| 306 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
image = np.zeros((128, 128, len(channels)))
|
| 308 |
|
| 309 |
if data.ndim == 3:
|
|
|
|
| 323 |
continue
|
| 324 |
|
| 325 |
# Prepare for model (Batch, Channel, Height, Width)
|
|
|
|
| 326 |
image_display = image.transpose(2, 0, 1) # (C, H, W)
|
| 327 |
image_tensor = torch.from_numpy(image_display).unsqueeze(0).float() # (1, C, H, W)
|
| 328 |
|
|
|
|
| 338 |
st.error(traceback.format_exc())
|
| 339 |
continue
|
| 340 |
|
| 341 |
+
if selected_example != "None" or uploaded_files:
|
| 342 |
+
st.success('✅ Processing completed!')
|