HReynaud commited on
Commit
e3044ba
1 Parent(s): 439103c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -123,7 +123,7 @@ with gr.Blocks(css="style.css") as demo:
123
  with gr.Column():
124
  with gr.Row():
125
  with gr.Column(scale=3, variant="panel"):
126
- text = gr.Markdown(value="This is a live demo of our work on cardiac ultrasound video generation. The model is trained on 4-chamber cardiac ultrasound videos and can generate realistic 4-chamber videos given a target Left Ventricle Ejection Fraction. Please, start by sampling a random frame from the pool of 100 images taken from the EchoNet-Dynamic dataset, which will act as the conditional image, representing the anatomy of the video. Then, set the target LVEF, and click the button to generate a video. The process takes 30s to 60s. The model running here corresponds to the 1SCM from the paper. **Click on the video to play it.** ")
127
  with gr.Column(scale=1, min_width="226"):
128
  image = gr.Image(interactive=True)
129
  with gr.Column(scale=1, min_width="226"):
 
123
  with gr.Column():
124
  with gr.Row():
125
  with gr.Column(scale=3, variant="panel"):
126
+ text = gr.Markdown(value="This is a live demo of our work on cardiac ultrasound video generation. The model is trained on 4-chamber cardiac ultrasound videos and can generate realistic 4-chamber videos given a target Left Ventricle Ejection Fraction. Please, start by sampling a random frame from the pool of 100 images taken from the EchoNet-Dynamic dataset, which will act as the conditional image, representing the anatomy of the video. Then, set the target LVEF, and click the button to generate a video. The process takes 30s to 60s. The model running here corresponds to the 1SCM from the paper. **Click on the video to play it.** [Code is available here](https://github.com/HReynaud/EchoDiffusion) ")
127
  with gr.Column(scale=1, min_width="226"):
128
  image = gr.Image(interactive=True)
129
  with gr.Column(scale=1, min_width="226"):