LinKadel commited on
Commit
f416d3d
1 Parent(s): 73a2807

Added background options, bug fixes

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. Backgrounds/1.jpg +0 -0
  3. Backgrounds/2.jpg +0 -0
  4. Backgrounds/3.jpg +3 -0
  5. app.py +25 -38
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Backgrounds/3.jpg filter=lfs diff=lfs merge=lfs -text
Backgrounds/1.jpg ADDED
Backgrounds/2.jpg ADDED
Backgrounds/3.jpg ADDED

Git LFS Details

  • SHA256: 38e335e578c8a7567902db9af3b6c02a9aeea35d584ca6eded0d9efc2fe016d0
  • Pointer size: 132 Bytes
  • Size of remote file: 5.57 MB
app.py CHANGED
@@ -1,46 +1,33 @@
1
- import torch
2
  import gradio as gr
3
- from gradio import processing_utils, utils
 
4
  from gradio_depth_pred import create_demo as create_depth_pred_demo
5
- from PIL import Image
6
- import random
7
- from diffusers import (
8
- DiffusionPipeline,
9
- AutoencoderKL,
10
- StableDiffusionControlNetPipeline,
11
- ControlNetModel,
12
- StableDiffusionLatentUpscalePipeline,
13
- StableDiffusionImg2ImgPipeline,
14
- StableDiffusionControlNetImg2ImgPipeline,
15
- DPMSolverMultistepScheduler, # <-- Added import
16
- EulerDiscreteScheduler # <-- Added import
17
- )
18
 
19
- print(f"Is CUDA available: {torch.cuda.is_available()}")
20
- print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
 
 
 
 
 
 
 
 
 
 
21
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
 
22
 
23
- import time
24
- from style import css
25
-
26
- BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
27
 
28
- title = "Ultra Heroes"
29
- description = "Testing composites and lighting tweaks."
 
 
 
30
 
31
- def inference(text):
32
- output_flan = ""
33
- output_vanilla = ""
34
- return [output_flan, output_vanilla]
35
 
36
- io = gr.Interface(
37
- inference,
38
- gr.Textbox(lines=3),
39
- outputs=[
40
- gr.Textbox(lines=3, label="Flan T5"),
41
- gr.Textbox(lines=3, label="T5")
42
- ],
43
- title=title,
44
- description=description,
45
- )
46
- io.launch()
 
 
1
  import gradio as gr
2
+ import torch
3
+
4
  from gradio_depth_pred import create_demo as create_depth_pred_demo
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ css = """
7
+ #img-display-container {
8
+ max-height: 50vh;
9
+ }
10
+ #img-display-input {
11
+ max-height: 40vh;
12
+ }
13
+ #img-display-output {
14
+ max-height: 40vh;
15
+ }
16
+
17
+ """
18
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
19
+ model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
20
 
21
+ title = "# Ultra Heroes"
22
+ description = """Official demo for **Ultra Heroes**.]
 
 
23
 
24
+ with gr.Blocks(css=css) as demo:
25
+ gr.Markdown(title)
26
+ gr.Markdown(description)
27
+ with gr.Tab("Depth Prediction"):
28
+ create_depth_pred_demo(model)
29
 
30
+ # gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/shariqfarooq/ZoeDepth?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></center>''')
 
 
 
31
 
32
+ if __name__ == '__main__':
33
+ demo.queue().launch()