hysts HF staff commited on
Commit
8f9f9bd
1 Parent(s): 2a12c43

Migrate from yapf to black

Browse files
Files changed (4) hide show
  1. .pre-commit-config.yaml +26 -12
  2. .style.yapf +0 -5
  3. .vscode/settings.json +11 -8
  4. app.py +72 -58
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -8,29 +8,43 @@ repos:
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
11
- - id: double-quote-string-fixer
12
  - id: end-of-file-fixer
13
  - id: mixed-line-ending
14
- args: ['--fix=lf']
15
  - id: requirements-txt-fixer
16
  - id: trailing-whitespace
17
  - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
  hooks:
20
  - id: docformatter
21
- args: ['--in-place']
22
  - repo: https://github.com/pycqa/isort
23
  rev: 5.12.0
24
  hooks:
25
  - id: isort
 
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
  hooks:
29
  - id: mypy
30
- args: ['--ignore-missing-imports']
31
- additional_dependencies: ['types-python-slugify']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
  hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
 
11
  - id: end-of-file-fixer
12
  - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
  - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
  hooks:
19
  - id: docformatter
20
+ args: ["--in-place"]
21
  - repo: https://github.com/pycqa/isort
22
  rev: 5.12.0
23
  hooks:
24
  - id: isort
25
+ args: ["--profile", "black"]
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.5.1
28
  hooks:
29
  - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies: ["types-python-slugify", "types-requests", "types-PyYAML"]
32
+ - repo: https://github.com/psf/black
33
+ rev: 23.9.1
34
  hooks:
35
+ - id: black
36
+ language_version: python3.10
37
+ args: ["--line-length", "119"]
38
+ - repo: https://github.com/kynan/nbstripout
39
+ rev: 0.6.1
40
+ hooks:
41
+ - id: nbstripout
42
+ args: ["--extra-keys", "metadata.interpreter metadata.kernelspec cell.metadata.pycharm"]
43
+ - repo: https://github.com/nbQA-dev/nbQA
44
+ rev: 1.7.0
45
+ hooks:
46
+ - id: nbqa-black
47
+ - id: nbqa-pyupgrade
48
+ args: ["--py37-plus"]
49
+ - id: nbqa-isort
50
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json CHANGED
@@ -1,18 +1,21 @@
1
  {
2
- "python.linting.enabled": true,
3
- "python.linting.flake8Enabled": true,
4
- "python.linting.pylintEnabled": false,
5
- "python.linting.lintOnSave": true,
6
- "python.formatting.provider": "yapf",
7
- "python.formatting.yapfArgs": [
8
- "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
- ],
10
  "[python]": {
 
11
  "editor.formatOnType": true,
12
  "editor.codeActionsOnSave": {
13
  "source.organizeImports": true
14
  }
15
  },
 
 
 
 
 
 
 
 
 
 
16
  "editor.formatOnSave": true,
17
  "files.insertFinalNewline": true
18
  }
 
1
  {
 
 
 
 
 
 
 
 
2
  "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
  "source.organizeImports": true
7
  }
8
  },
9
+ "black-formatter.args": [
10
+ "--line-length=119"
11
+ ],
12
+ "isort.args": ["--profile", "black"],
13
+ "flake8.args": [
14
+ "--max-line-length=119"
15
+ ],
16
+ "ruff.args": [
17
+ "--line-length=119"
18
+ ],
19
  "editor.formatOnSave": true,
20
  "files.insertFinalNewline": true
21
  }
app.py CHANGED
@@ -13,25 +13,22 @@ import spaces
13
  import torch
14
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
15
 
16
- DESCRIPTION = '# zeroscope v2'
17
  if not torch.cuda.is_available():
18
- DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
19
 
20
- MAX_NUM_FRAMES = int(os.getenv('MAX_NUM_FRAMES', '200'))
21
- DEFAULT_NUM_FRAMES = min(MAX_NUM_FRAMES,
22
- int(os.getenv('DEFAULT_NUM_FRAMES', '24')))
23
  MAX_SEED = np.iinfo(np.int32).max
24
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
25
- 'CACHE_EXAMPLES') == '1'
26
 
27
  if torch.cuda.is_available():
28
- pipe = DiffusionPipeline.from_pretrained('cerspense/zeroscope_v2_576w',
29
- torch_dtype=torch.float16)
30
  pipe.enable_model_cpu_offload()
 
31
  else:
32
- pipe = DiffusionPipeline.from_pretrained('cerspense/zeroscope_v2_576w')
33
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
34
- pipe.enable_vae_slicing()
35
 
36
 
37
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
@@ -41,8 +38,8 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
41
 
42
 
43
  def to_video(frames: list[np.ndarray], fps: int) -> str:
44
- out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
45
- writer = imageio.get_writer(out_file.name, format='FFMPEG', fps=fps)
46
  for frame in frames:
47
  writer.append_data(frame)
48
  writer.close()
@@ -50,59 +47,72 @@ def to_video(frames: list[np.ndarray], fps: int) -> str:
50
 
51
 
52
  @spaces.GPU
53
- def generate(prompt: str, seed: int, num_frames: int,
54
- num_inference_steps: int) -> str:
 
 
 
 
55
  generator = torch.Generator().manual_seed(seed)
56
- frames = pipe(prompt,
57
- num_inference_steps=num_inference_steps,
58
- num_frames=num_frames,
59
- width=576,
60
- height=320,
61
- generator=generator).frames
 
 
62
  return to_video(frames, 8)
63
 
64
 
65
  examples = [
66
- ['An astronaut riding a horse', 0, 24, 25],
67
- ['A panda eating bamboo on a rock', 0, 24, 25],
68
- ['Spiderman is surfing', 0, 24, 25],
69
  ]
70
 
71
- with gr.Blocks(css='style.css') as demo:
72
  gr.Markdown(DESCRIPTION)
73
- gr.DuplicateButton(value='Duplicate Space for private use',
74
- elem_id='duplicate-button',
75
- visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
 
 
76
  with gr.Box():
77
  with gr.Row():
78
- prompt = gr.Text(label='Prompt',
79
- show_label=False,
80
- max_lines=1,
81
- placeholder='Enter your prompt',
82
- container=False)
83
- run_button = gr.Button('Generate video', scale=0)
84
- result = gr.Video(label='Result', show_label=False)
85
- with gr.Accordion('Advanced options', open=False):
86
- seed = gr.Slider(label='Seed',
87
- minimum=0,
88
- maximum=MAX_SEED,
89
- step=1,
90
- value=0)
91
- randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
 
 
 
 
92
  num_frames = gr.Slider(
93
- label='Number of frames',
94
  minimum=24,
95
  maximum=MAX_NUM_FRAMES,
96
  step=1,
97
  value=24,
98
- info=
99
- 'Note that the content of the video also changes when you change the number of frames.'
 
 
 
 
 
 
100
  )
101
- num_inference_steps = gr.Slider(label='Number of inference steps',
102
- minimum=10,
103
- maximum=50,
104
- step=1,
105
- value=25)
106
 
107
  inputs = [
108
  prompt,
@@ -110,11 +120,13 @@ with gr.Blocks(css='style.css') as demo:
110
  num_frames,
111
  num_inference_steps,
112
  ]
113
- gr.Examples(examples=examples,
114
- inputs=inputs,
115
- outputs=result,
116
- fn=generate,
117
- cache_examples=CACHE_EXAMPLES)
 
 
118
 
119
  prompt.submit(
120
  fn=randomize_seed_fn,
@@ -126,7 +138,7 @@ with gr.Blocks(css='style.css') as demo:
126
  fn=generate,
127
  inputs=inputs,
128
  outputs=result,
129
- api_name='run',
130
  )
131
  run_button.click(
132
  fn=randomize_seed_fn,
@@ -140,4 +152,6 @@ with gr.Blocks(css='style.css') as demo:
140
  outputs=result,
141
  api_name=False,
142
  )
143
- demo.queue(max_size=10).launch()
 
 
 
13
  import torch
14
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
15
 
16
+ DESCRIPTION = "# zeroscope v2"
17
  if not torch.cuda.is_available():
18
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
19
 
20
+ MAX_NUM_FRAMES = int(os.getenv("MAX_NUM_FRAMES", "200"))
21
+ DEFAULT_NUM_FRAMES = min(MAX_NUM_FRAMES, int(os.getenv("DEFAULT_NUM_FRAMES", "24")))
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
 
24
 
25
  if torch.cuda.is_available():
26
+ pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
27
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
28
  pipe.enable_model_cpu_offload()
29
+ pipe.enable_vae_slicing()
30
  else:
31
+ pipe = None
 
 
32
 
33
 
34
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
 
38
 
39
 
40
  def to_video(frames: list[np.ndarray], fps: int) -> str:
41
+ out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
42
+ writer = imageio.get_writer(out_file.name, format="FFMPEG", fps=fps)
43
  for frame in frames:
44
  writer.append_data(frame)
45
  writer.close()
 
47
 
48
 
49
  @spaces.GPU
50
+ def generate(
51
+ prompt: str,
52
+ seed: int,
53
+ num_frames: int,
54
+ num_inference_steps: int,
55
+ ) -> str:
56
  generator = torch.Generator().manual_seed(seed)
57
+ frames = pipe(
58
+ prompt,
59
+ num_inference_steps=num_inference_steps,
60
+ num_frames=num_frames,
61
+ width=576,
62
+ height=320,
63
+ generator=generator,
64
+ ).frames
65
  return to_video(frames, 8)
66
 
67
 
68
  examples = [
69
+ ["An astronaut riding a horse", 0, 24, 25],
70
+ ["A panda eating bamboo on a rock", 0, 24, 25],
71
+ ["Spiderman is surfing", 0, 24, 25],
72
  ]
73
 
74
+ with gr.Blocks(css="style.css") as demo:
75
  gr.Markdown(DESCRIPTION)
76
+ gr.DuplicateButton(
77
+ value="Duplicate Space for private use",
78
+ elem_id="duplicate-button",
79
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
80
+ )
81
  with gr.Box():
82
  with gr.Row():
83
+ prompt = gr.Text(
84
+ label="Prompt",
85
+ show_label=False,
86
+ max_lines=1,
87
+ placeholder="Enter your prompt",
88
+ container=False,
89
+ )
90
+ run_button = gr.Button("Generate video", scale=0)
91
+ result = gr.Video(label="Result", show_label=False)
92
+ with gr.Accordion("Advanced options", open=False):
93
+ seed = gr.Slider(
94
+ label="Seed",
95
+ minimum=0,
96
+ maximum=MAX_SEED,
97
+ step=1,
98
+ value=0,
99
+ )
100
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
  num_frames = gr.Slider(
102
+ label="Number of frames",
103
  minimum=24,
104
  maximum=MAX_NUM_FRAMES,
105
  step=1,
106
  value=24,
107
+ info="Note that the content of the video also changes when you change the number of frames.",
108
+ )
109
+ num_inference_steps = gr.Slider(
110
+ label="Number of inference steps",
111
+ minimum=10,
112
+ maximum=50,
113
+ step=1,
114
+ value=25,
115
  )
 
 
 
 
 
116
 
117
  inputs = [
118
  prompt,
 
120
  num_frames,
121
  num_inference_steps,
122
  ]
123
+ gr.Examples(
124
+ examples=examples,
125
+ inputs=inputs,
126
+ outputs=result,
127
+ fn=generate,
128
+ cache_examples=CACHE_EXAMPLES,
129
+ )
130
 
131
  prompt.submit(
132
  fn=randomize_seed_fn,
 
138
  fn=generate,
139
  inputs=inputs,
140
  outputs=result,
141
+ api_name="run",
142
  )
143
  run_button.click(
144
  fn=randomize_seed_fn,
 
152
  outputs=result,
153
  api_name=False,
154
  )
155
+
156
+ if __name__ == "__main__":
157
+ demo.queue(max_size=10).launch()