hysts HF staff commited on
Commit
3f8fe83
1 Parent(s): 9f05bc5

Migrate from yapf to black

Browse files
Files changed (7) hide show
  1. .pre-commit-config.yaml +26 -12
  2. .vscode/settings.json +11 -8
  3. app.py +10 -8
  4. app_image_to_3d.py +36 -28
  5. app_text_to_3d.py +49 -38
  6. model.py +25 -34
  7. settings.py +1 -1
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -8,29 +8,43 @@ repos:
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
11
- - id: double-quote-string-fixer
12
  - id: end-of-file-fixer
13
  - id: mixed-line-ending
14
- args: ['--fix=lf']
15
  - id: requirements-txt-fixer
16
  - id: trailing-whitespace
17
  - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
  hooks:
20
  - id: docformatter
21
- args: ['--in-place']
22
  - repo: https://github.com/pycqa/isort
23
  rev: 5.12.0
24
  hooks:
25
  - id: isort
 
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
  hooks:
29
  - id: mypy
30
- args: ['--ignore-missing-imports']
31
- additional_dependencies: ['types-python-slugify']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
  hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
 
11
  - id: end-of-file-fixer
12
  - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
  - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
  hooks:
19
  - id: docformatter
20
+ args: ["--in-place"]
21
  - repo: https://github.com/pycqa/isort
22
  rev: 5.12.0
23
  hooks:
24
  - id: isort
25
+ args: ["--profile", "black"]
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.5.1
28
  hooks:
29
  - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies: ["types-python-slugify", "types-requests", "types-PyYAML"]
32
+ - repo: https://github.com/psf/black
33
+ rev: 23.7.0
34
  hooks:
35
+ - id: black
36
+ language_version: python3.10
37
+ args: ["--line-length", "119"]
38
+ - repo: https://github.com/kynan/nbstripout
39
+ rev: 0.6.1
40
+ hooks:
41
+ - id: nbstripout
42
+ args: ["--extra-keys", "metadata.interpreter metadata.kernelspec cell.metadata.pycharm"]
43
+ - repo: https://github.com/nbQA-dev/nbQA
44
+ rev: 1.7.0
45
+ hooks:
46
+ - id: nbqa-black
47
+ - id: nbqa-pyupgrade
48
+ args: ["--py37-plus"]
49
+ - id: nbqa-isort
50
+ args: ["--float-to-top"]
.vscode/settings.json CHANGED
@@ -1,18 +1,21 @@
1
  {
2
- "python.linting.enabled": true,
3
- "python.linting.flake8Enabled": true,
4
- "python.linting.pylintEnabled": false,
5
- "python.linting.lintOnSave": true,
6
- "python.formatting.provider": "yapf",
7
- "python.formatting.yapfArgs": [
8
- "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
- ],
10
  "[python]": {
 
11
  "editor.formatOnType": true,
12
  "editor.codeActionsOnSave": {
13
  "source.organizeImports": true
14
  }
15
  },
 
 
 
 
 
 
 
 
 
 
16
  "editor.formatOnSave": true,
17
  "files.insertFinalNewline": true
18
  }
1
  {
 
 
 
 
 
 
 
 
2
  "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
  "source.organizeImports": true
7
  }
8
  },
9
+ "black-formatter.args": [
10
+ "--line-length=119"
11
+ ],
12
+ "isort.args": ["--profile", "black"],
13
+ "flake8.args": [
14
+ "--max-line-length=119"
15
+ ],
16
+ "ruff.args": [
17
+ "--line-length=119"
18
+ ],
19
  "editor.formatOnSave": true,
20
  "files.insertFinalNewline": true
21
  }
app.py CHANGED
@@ -9,21 +9,23 @@ from app_image_to_3d import create_demo as create_demo_image_to_3d
9
  from app_text_to_3d import create_demo as create_demo_text_to_3d
10
  from model import Model
11
 
12
- DESCRIPTION = '# [Shap-E](https://github.com/openai/shap-e)'
13
 
14
  if not torch.cuda.is_available():
15
- DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
16
 
17
  model = Model()
18
 
19
- with gr.Blocks(css='style.css') as demo:
20
  gr.Markdown(DESCRIPTION)
21
- gr.DuplicateButton(value='Duplicate Space for private use',
22
- elem_id='duplicate-button',
23
- visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
 
 
24
  with gr.Tabs():
25
- with gr.Tab(label='Text to 3D'):
26
  create_demo_text_to_3d(model)
27
- with gr.Tab(label='Image to 3D'):
28
  create_demo_image_to_3d(model)
29
  demo.queue(max_size=10).launch()
9
  from app_text_to_3d import create_demo as create_demo_text_to_3d
10
  from model import Model
11
 
12
+ DESCRIPTION = "# [Shap-E](https://github.com/openai/shap-e)"
13
 
14
  if not torch.cuda.is_available():
15
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
16
 
17
  model = Model()
18
 
19
+ with gr.Blocks(css="style.css") as demo:
20
  gr.Markdown(DESCRIPTION)
21
+ gr.DuplicateButton(
22
+ value="Duplicate Space for private use",
23
+ elem_id="duplicate-button",
24
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
25
+ )
26
  with gr.Tabs():
27
+ with gr.Tab(label="Text to 3D"):
28
  create_demo_text_to_3d(model)
29
+ with gr.Tab(label="Image to 3D"):
30
  create_demo_image_to_3d(model)
31
  demo.queue(max_size=10).launch()
app_image_to_3d.py CHANGED
@@ -12,46 +12,53 @@ from utils import randomize_seed_fn
12
 
13
 
14
  def create_demo(model: Model) -> gr.Blocks:
15
- if not pathlib.Path('corgi.png').exists():
16
  subprocess.run(
17
  shlex.split(
18
- 'wget https://raw.githubusercontent.com/openai/shap-e/d99cedaea18e0989e340163dbaeb4b109fa9e8ec/shap_e/examples/example_data/corgi.png -O corgi.png'
19
- ))
20
- examples = ['corgi.png']
 
21
 
22
  def process_example_fn(image_path: str) -> str:
23
  return model.run_image(image_path)
24
 
25
  with gr.Blocks() as demo:
26
  with gr.Box():
27
- image = gr.Image(label='Input image', show_label=False, type='pil')
28
- run_button = gr.Button('Run')
29
- result = gr.Model3D(label='Result', show_label=False)
30
- with gr.Accordion('Advanced options', open=False):
31
- seed = gr.Slider(label='Seed',
32
- minimum=0,
33
- maximum=MAX_SEED,
34
- step=1,
35
- value=0)
36
- randomize_seed = gr.Checkbox(label='Randomize seed',
37
- value=True)
38
- guidance_scale = gr.Slider(label='Guidance scale',
39
- minimum=1,
40
- maximum=20,
41
- step=0.1,
42
- value=3.0)
 
 
 
43
  num_inference_steps = gr.Slider(
44
- label='Number of inference steps',
45
  minimum=1,
46
  maximum=100,
47
  step=1,
48
- value=64)
 
49
 
50
- gr.Examples(examples=examples,
51
- inputs=image,
52
- outputs=result,
53
- fn=process_example_fn,
54
- cache_examples=CACHE_EXAMPLES)
 
 
55
 
56
  inputs = [
57
  image,
@@ -65,10 +72,11 @@ def create_demo(model: Model) -> gr.Blocks:
65
  inputs=[seed, randomize_seed],
66
  outputs=seed,
67
  queue=False,
 
68
  ).then(
69
  fn=model.run_image,
70
  inputs=inputs,
71
  outputs=result,
72
- api_name='image-to-3d',
73
  )
74
  return demo
12
 
13
 
14
  def create_demo(model: Model) -> gr.Blocks:
15
+ if not pathlib.Path("corgi.png").exists():
16
  subprocess.run(
17
  shlex.split(
18
+ "wget https://raw.githubusercontent.com/openai/shap-e/d99cedaea18e0989e340163dbaeb4b109fa9e8ec/shap_e/examples/example_data/corgi.png -O corgi.png"
19
+ )
20
+ )
21
+ examples = ["corgi.png"]
22
 
23
  def process_example_fn(image_path: str) -> str:
24
  return model.run_image(image_path)
25
 
26
  with gr.Blocks() as demo:
27
  with gr.Box():
28
+ image = gr.Image(label="Input image", show_label=False, type="pil")
29
+ run_button = gr.Button("Run")
30
+ result = gr.Model3D(label="Result", show_label=False)
31
+ with gr.Accordion("Advanced options", open=False):
32
+ seed = gr.Slider(
33
+ label="Seed",
34
+ minimum=0,
35
+ maximum=MAX_SEED,
36
+ step=1,
37
+ value=0,
38
+ )
39
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
40
+ guidance_scale = gr.Slider(
41
+ label="Guidance scale",
42
+ minimum=1,
43
+ maximum=20,
44
+ step=0.1,
45
+ value=3.0,
46
+ )
47
  num_inference_steps = gr.Slider(
48
+ label="Number of inference steps",
49
  minimum=1,
50
  maximum=100,
51
  step=1,
52
+ value=64,
53
+ )
54
 
55
+ gr.Examples(
56
+ examples=examples,
57
+ inputs=image,
58
+ outputs=result,
59
+ fn=process_example_fn,
60
+ cache_examples=CACHE_EXAMPLES,
61
+ )
62
 
63
  inputs = [
64
  image,
72
  inputs=[seed, randomize_seed],
73
  outputs=seed,
74
  queue=False,
75
+ api_name=False,
76
  ).then(
77
  fn=model.run_image,
78
  inputs=inputs,
79
  outputs=result,
80
+ api_name="image-to-3d",
81
  )
82
  return demo
app_text_to_3d.py CHANGED
@@ -9,15 +9,15 @@ from utils import randomize_seed_fn
9
 
10
  def create_demo(model: Model) -> gr.Blocks:
11
  examples = [
12
- 'A chair that looks like an avocado',
13
- 'An airplane that looks like a banana',
14
- 'A spaceship',
15
- 'A birthday cupcake',
16
- 'A chair that looks like a tree',
17
- 'A green boot',
18
- 'A penguin',
19
- 'Ube ice cream cone',
20
- 'A bowl of vegetables',
21
  ]
22
 
23
  def process_example_fn(prompt: str) -> str:
@@ -25,39 +25,47 @@ def create_demo(model: Model) -> gr.Blocks:
25
 
26
  with gr.Blocks() as demo:
27
  with gr.Box():
28
- with gr.Row(elem_id='prompt-container'):
29
- prompt = gr.Text(label='Prompt',
30
- show_label=False,
31
- max_lines=1,
32
- placeholder='Enter your prompt',
33
- container=False)
34
- run_button = gr.Button('Run', scale=0)
35
- result = gr.Model3D(label='Result', show_label=False)
36
- with gr.Accordion('Advanced options', open=False):
37
- seed = gr.Slider(label='Seed',
38
- minimum=0,
39
- maximum=MAX_SEED,
40
- step=1,
41
- value=0)
42
- randomize_seed = gr.Checkbox(label='Randomize seed',
43
- value=True)
44
- guidance_scale = gr.Slider(label='Guidance scale',
45
- minimum=1,
46
- maximum=20,
47
- step=0.1,
48
- value=15.0)
 
 
 
 
 
49
  num_inference_steps = gr.Slider(
50
- label='Number of inference steps',
51
  minimum=1,
52
  maximum=100,
53
  step=1,
54
- value=64)
 
55
 
56
- gr.Examples(examples=examples,
57
- inputs=prompt,
58
- outputs=result,
59
- fn=process_example_fn,
60
- cache_examples=CACHE_EXAMPLES)
 
 
61
 
62
  inputs = [
63
  prompt,
@@ -70,20 +78,23 @@ def create_demo(model: Model) -> gr.Blocks:
70
  inputs=[seed, randomize_seed],
71
  outputs=seed,
72
  queue=False,
 
73
  ).then(
74
  fn=model.run_text,
75
  inputs=inputs,
76
  outputs=result,
 
77
  )
78
  run_button.click(
79
  fn=randomize_seed_fn,
80
  inputs=[seed, randomize_seed],
81
  outputs=seed,
82
  queue=False,
 
83
  ).then(
84
  fn=model.run_text,
85
  inputs=inputs,
86
  outputs=result,
87
- api_name='text-to-3d',
88
  )
89
  return demo
9
 
10
  def create_demo(model: Model) -> gr.Blocks:
11
  examples = [
12
+ "A chair that looks like an avocado",
13
+ "An airplane that looks like a banana",
14
+ "A spaceship",
15
+ "A birthday cupcake",
16
+ "A chair that looks like a tree",
17
+ "A green boot",
18
+ "A penguin",
19
+ "Ube ice cream cone",
20
+ "A bowl of vegetables",
21
  ]
22
 
23
  def process_example_fn(prompt: str) -> str:
25
 
26
  with gr.Blocks() as demo:
27
  with gr.Box():
28
+ with gr.Row(elem_id="prompt-container"):
29
+ prompt = gr.Text(
30
+ label="Prompt",
31
+ show_label=False,
32
+ max_lines=1,
33
+ placeholder="Enter your prompt",
34
+ container=False,
35
+ )
36
+ run_button = gr.Button("Run", scale=0)
37
+ result = gr.Model3D(label="Result", show_label=False)
38
+ with gr.Accordion("Advanced options", open=False):
39
+ seed = gr.Slider(
40
+ label="Seed",
41
+ minimum=0,
42
+ maximum=MAX_SEED,
43
+ step=1,
44
+ value=0,
45
+ )
46
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
47
+ guidance_scale = gr.Slider(
48
+ label="Guidance scale",
49
+ minimum=1,
50
+ maximum=20,
51
+ step=0.1,
52
+ value=15.0,
53
+ )
54
  num_inference_steps = gr.Slider(
55
+ label="Number of inference steps",
56
  minimum=1,
57
  maximum=100,
58
  step=1,
59
+ value=64,
60
+ )
61
 
62
+ gr.Examples(
63
+ examples=examples,
64
+ inputs=prompt,
65
+ outputs=result,
66
+ fn=process_example_fn,
67
+ cache_examples=CACHE_EXAMPLES,
68
+ )
69
 
70
  inputs = [
71
  prompt,
78
  inputs=[seed, randomize_seed],
79
  outputs=seed,
80
  queue=False,
81
+ api_name=False,
82
  ).then(
83
  fn=model.run_text,
84
  inputs=inputs,
85
  outputs=result,
86
+ api_name=False,
87
  )
88
  run_button.click(
89
  fn=randomize_seed_fn,
90
  inputs=[seed, randomize_seed],
91
  outputs=seed,
92
  queue=False,
93
+ api_name=False,
94
  ).then(
95
  fn=model.run_text,
96
  inputs=inputs,
97
  outputs=result,
98
+ api_name="text-to-3d",
99
  )
100
  return demo
model.py CHANGED
@@ -10,14 +10,11 @@ from diffusers.utils import export_to_ply
10
 
11
  class Model:
12
  def __init__(self):
13
- self.device = torch.device(
14
- 'cuda' if torch.cuda.is_available() else 'cpu')
15
- self.pipe = ShapEPipeline.from_pretrained('openai/shap-e',
16
- torch_dtype=torch.float16)
17
  self.pipe.to(self.device)
18
 
19
- self.pipe_img = ShapEImg2ImgPipeline.from_pretrained(
20
- 'openai/shap-e-img2img', torch_dtype=torch.float16)
21
  self.pipe_img.to(self.device)
22
 
23
  def to_glb(self, ply_path: str) -> str:
@@ -26,40 +23,34 @@ class Model:
26
  mesh = mesh.apply_transform(rot)
27
  rot = trimesh.transformations.rotation_matrix(np.pi, [0, 1, 0])
28
  mesh = mesh.apply_transform(rot)
29
- mesh_path = tempfile.NamedTemporaryFile(suffix='.glb', delete=False)
30
- mesh.export(mesh_path.name, file_type='glb')
31
  return mesh_path.name
32
 
33
- def run_text(self,
34
- prompt: str,
35
- seed: int = 0,
36
- guidance_scale: float = 15.0,
37
- num_steps: int = 64) -> str:
38
  generator = torch.Generator(device=self.device).manual_seed(seed)
39
- images = self.pipe(prompt,
40
- generator=generator,
41
- guidance_scale=guidance_scale,
42
- num_inference_steps=num_steps,
43
- output_type='mesh').images
44
- ply_path = tempfile.NamedTemporaryFile(suffix='.ply',
45
- delete=False,
46
- mode='w+b')
47
  export_to_ply(images[0], ply_path.name)
48
  return self.to_glb(ply_path.name)
49
 
50
- def run_image(self,
51
- image: PIL.Image.Image,
52
- seed: int = 0,
53
- guidance_scale: float = 3.0,
54
- num_steps: int = 64) -> str:
55
  generator = torch.Generator(device=self.device).manual_seed(seed)
56
- images = self.pipe_img(image,
57
- generator=generator,
58
- guidance_scale=guidance_scale,
59
- num_inference_steps=num_steps,
60
- output_type='mesh').images
61
- ply_path = tempfile.NamedTemporaryFile(suffix='.ply',
62
- delete=False,
63
- mode='w+b')
64
  export_to_ply(images[0], ply_path.name)
65
  return self.to_glb(ply_path.name)
10
 
11
  class Model:
12
  def __init__(self):
13
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ self.pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16)
 
 
15
  self.pipe.to(self.device)
16
 
17
+ self.pipe_img = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16)
 
18
  self.pipe_img.to(self.device)
19
 
20
  def to_glb(self, ply_path: str) -> str:
23
  mesh = mesh.apply_transform(rot)
24
  rot = trimesh.transformations.rotation_matrix(np.pi, [0, 1, 0])
25
  mesh = mesh.apply_transform(rot)
26
+ mesh_path = tempfile.NamedTemporaryFile(suffix=".glb", delete=False)
27
+ mesh.export(mesh_path.name, file_type="glb")
28
  return mesh_path.name
29
 
30
+ def run_text(self, prompt: str, seed: int = 0, guidance_scale: float = 15.0, num_steps: int = 64) -> str:
 
 
 
 
31
  generator = torch.Generator(device=self.device).manual_seed(seed)
32
+ images = self.pipe(
33
+ prompt,
34
+ generator=generator,
35
+ guidance_scale=guidance_scale,
36
+ num_inference_steps=num_steps,
37
+ output_type="mesh",
38
+ ).images
39
+ ply_path = tempfile.NamedTemporaryFile(suffix=".ply", delete=False, mode="w+b")
40
  export_to_ply(images[0], ply_path.name)
41
  return self.to_glb(ply_path.name)
42
 
43
+ def run_image(
44
+ self, image: PIL.Image.Image, seed: int = 0, guidance_scale: float = 3.0, num_steps: int = 64
45
+ ) -> str:
 
 
46
  generator = torch.Generator(device=self.device).manual_seed(seed)
47
+ images = self.pipe_img(
48
+ image,
49
+ generator=generator,
50
+ guidance_scale=guidance_scale,
51
+ num_inference_steps=num_steps,
52
+ output_type="mesh",
53
+ ).images
54
+ ply_path = tempfile.NamedTemporaryFile(suffix=".ply", delete=False, mode="w+b")
55
  export_to_ply(images[0], ply_path.name)
56
  return self.to_glb(ply_path.name)
settings.py CHANGED
@@ -2,6 +2,6 @@ import os
2
 
3
  import numpy as np
4
 
5
- CACHE_EXAMPLES = os.getenv('CACHE_EXAMPLES') == '1'
6
 
7
  MAX_SEED = np.iinfo(np.int32).max
2
 
3
  import numpy as np
4
 
5
+ CACHE_EXAMPLES = os.getenv("CACHE_EXAMPLES") == "1"
6
 
7
  MAX_SEED = np.iinfo(np.int32).max