Paolo-Fraccaro commited on
Commit
ab8e4fb
1 Parent(s): e6ffee0

add markdown

Browse files
Files changed (1) hide show
  1. app.py +14 -4
app.py CHANGED
@@ -393,14 +393,24 @@ def preprocess_example(example_list):
393
 
394
 
395
  with gr.Blocks() as demo:
396
-
 
 
 
 
 
 
 
 
 
 
397
  with gr.Row():
398
  with gr.Column():
399
  inp_files = gr.Files(elem_id='files')
400
  # inp_slider = gr.Slider(0, 100, value=50, label="Mask ratio", info="Choose ratio of masking between 0 and 100", elem_id='slider'),
401
  btn = gr.Button("Submit")
402
  with gr.Row():
403
- gr.Markdown(value='Original images')
404
  with gr.Row():
405
  gr.Markdown(value='T1')
406
  gr.Markdown(value='T2')
@@ -410,7 +420,7 @@ with gr.Blocks() as demo:
410
  out2_orig_t2 = gr.Image(image_mode='RGB')
411
  out3_orig_t3 = gr.Image(image_mode='RGB')
412
  with gr.Row():
413
- gr.Markdown(value='Masked images')
414
  with gr.Row():
415
  gr.Markdown(value='T1')
416
  gr.Markdown(value='T2')
@@ -420,7 +430,7 @@ with gr.Blocks() as demo:
420
  out5_masked_t2 = gr.Image(image_mode='RGB')
421
  out6_masked_t3 = gr.Image(image_mode='RGB')
422
  with gr.Row():
423
- gr.Markdown(value='Reonstructed images')
424
  with gr.Row():
425
  gr.Markdown(value='T1')
426
  gr.Markdown(value='T2')
 
393
 
394
 
395
  with gr.Blocks() as demo:
396
+
397
+ gr.Markdown(value='# Prithvi image reconstruction demo')
398
+ gr.Markdown(value='''Prithvi is a first-of-its-kind temporal Vision transformer pretrained by the IBM and NASA team on continental US Harmonised
399
+ Landsat Sentinel 2 (HLS) data. Particularly, the model adopts a self-supervised encoder developed with a ViT architecture and Masked AutoEncoder
400
+ learning strategy, with a MSE as a loss function. The model includes spatial attention across multiple patchies and also temporal attention for
401
+ each patch. More info about the model and its weights are available [here](https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M).\n
402
+ This demo showcases the image reconstracting over three timestamps, with the user providing a set of three HLS images and the model randomly masking
403
+ out some proportion of the images and then reconstructing them based on the not masked portion of the images.\n
404
+ The user needs to provide three HLS geotiff images, including the following channels: Blue, Green, Red, NIRa, SWIR, SWIR 2.
405
+
406
+ ''')
407
  with gr.Row():
408
  with gr.Column():
409
  inp_files = gr.Files(elem_id='files')
410
  # inp_slider = gr.Slider(0, 100, value=50, label="Mask ratio", info="Choose ratio of masking between 0 and 100", elem_id='slider'),
411
  btn = gr.Button("Submit")
412
  with gr.Row():
413
+ gr.Markdown(value='## Original images')
414
  with gr.Row():
415
  gr.Markdown(value='T1')
416
  gr.Markdown(value='T2')
 
420
  out2_orig_t2 = gr.Image(image_mode='RGB')
421
  out3_orig_t3 = gr.Image(image_mode='RGB')
422
  with gr.Row():
423
+ gr.Markdown(value='## Masked images')
424
  with gr.Row():
425
  gr.Markdown(value='T1')
426
  gr.Markdown(value='T2')
 
430
  out5_masked_t2 = gr.Image(image_mode='RGB')
431
  out6_masked_t3 = gr.Image(image_mode='RGB')
432
  with gr.Row():
433
+ gr.Markdown(value='## Reonstructed images')
434
  with gr.Row():
435
  gr.Markdown(value='T1')
436
  gr.Markdown(value='T2')