jassinghNY commited on
Commit
99b3556
·
verified ·
1 Parent(s): fe06760

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -44
app.py CHANGED
@@ -1,18 +1,15 @@
1
- import gradio as gr
2
- import spaces
3
- from lama_cleaner.server import main
4
  from typing import List
5
  from pydantic import BaseModel
6
- import torch
7
 
8
  class FakeArgs(BaseModel):
9
  host: str = "0.0.0.0"
10
  port: int = 7860
11
  model: str = 'lama'
12
  hf_access_token: str = ""
13
- sd_enable_xformers: bool = True
14
  sd_disable_nsfw: bool = False
15
- sd_cpu_textencoder: bool = False
16
  sd_controlnet: bool = False
17
  sd_controlnet_method: str = "control_v11p_sd15_canny"
18
  sd_local_model_path: str = ""
@@ -20,14 +17,14 @@ class FakeArgs(BaseModel):
20
  local_files_only: bool = False
21
  cpu_offload: bool = False
22
  device: str = "cuda"
23
- gui: bool = True
24
  gui_size: List[int] = [1000, 1000]
25
  input: str = ''
26
  disable_model_switch: bool = True
27
  debug: bool = False
28
  no_half: bool = False
29
  disable_nsfw: bool = False
30
- enable_xformers: bool = True
31
  enable_interactive_seg: bool = True
32
  interactive_seg_model: str = "vit_b"
33
  interactive_seg_device: str = "cuda"
@@ -42,40 +39,5 @@ class FakeArgs(BaseModel):
42
  model_dir: str = None
43
  output_dir: str = None
44
 
45
- @spaces.GPU
46
- def process_image(image, mask):
47
- # Implement the actual image processing logic here
48
- # using Lama Cleaner functionality
49
- # For now, we'll just return the input image
50
- return image
51
-
52
- def create_gradio_interface():
53
- interface = gr.Interface(
54
- fn=process_image,
55
- inputs=[
56
- gr.Image(type="numpy", label="Input Image"),
57
- gr.Image(type="numpy", label="Mask") # Removed 'source' and 'tool' parameters
58
- ],
59
- outputs=gr.Image(type="numpy", label="Result"),
60
- title="Lama Cleaner with ZeroGPU",
61
- description="Upload an image, upload or draw a mask for inpainting, then click Submit."
62
- )
63
- return interface
64
-
65
- @spaces.GPU
66
- def run_lama_cleaner():
67
- args = FakeArgs()
68
- main(args)
69
-
70
  if __name__ == "__main__":
71
- if torch.cuda.is_available():
72
- print("CUDA is available. Using GPU.")
73
- else:
74
- print("CUDA is not available. Falling back to CPU.")
75
- FakeArgs.device = "cpu"
76
- FakeArgs.interactive_seg_device = "cpu"
77
- FakeArgs.gfpgan_device = "cpu"
78
-
79
- interface = create_gradio_interface()
80
- interface.launch()
81
- run_lama_cleaner()
 
 
 
 
1
  from typing import List
2
  from pydantic import BaseModel
3
+ from lama_cleaner.server import main
4
 
5
  class FakeArgs(BaseModel):
6
  host: str = "0.0.0.0"
7
  port: int = 7860
8
  model: str = 'lama'
9
  hf_access_token: str = ""
10
+ sd_enable_xformers: bool = False
11
  sd_disable_nsfw: bool = False
12
+ sd_cpu_textencoder: bool = True
13
  sd_controlnet: bool = False
14
  sd_controlnet_method: str = "control_v11p_sd15_canny"
15
  sd_local_model_path: str = ""
 
17
  local_files_only: bool = False
18
  cpu_offload: bool = False
19
  device: str = "cuda"
20
+ gui: bool = False
21
  gui_size: List[int] = [1000, 1000]
22
  input: str = ''
23
  disable_model_switch: bool = True
24
  debug: bool = False
25
  no_half: bool = False
26
  disable_nsfw: bool = False
27
+ enable_xformers: bool = False
28
  enable_interactive_seg: bool = True
29
  interactive_seg_model: str = "vit_b"
30
  interactive_seg_device: str = "cuda"
 
39
  model_dir: str = None
40
  output_dir: str = None
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  if __name__ == "__main__":
43
+ main(FakeArgs())