hysts HF staff commited on
Commit
9735cfa
1 Parent(s): b5f45ec
Files changed (6) hide show
  1. .pre-commit-config.yaml +2 -2
  2. README.md +2 -1
  3. app.py +138 -176
  4. dualstylegan.py +24 -21
  5. requirements.txt +7 -7
  6. style.css +2 -4
.pre-commit-config.yaml CHANGED
@@ -20,11 +20,11 @@ repos:
20
  - id: docformatter
21
  args: ['--in-place']
22
  - repo: https://github.com/pycqa/isort
23
- rev: 5.10.1
24
  hooks:
25
  - id: isort
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.812
28
  hooks:
29
  - id: mypy
30
  args: ['--ignore-missing-imports']
20
  - id: docformatter
21
  args: ['--in-place']
22
  - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
  hooks:
25
  - id: isort
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
  hooks:
29
  - id: mypy
30
  args: ['--ignore-missing-imports']
README.md CHANGED
@@ -4,9 +4,10 @@ emoji: 😻
4
  colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.0.15
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
4
  colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
+ suggested_hardware: t4-small
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py CHANGED
@@ -2,30 +2,16 @@
2
 
3
  from __future__ import annotations
4
 
5
- import argparse
6
  import pathlib
7
 
8
  import gradio as gr
9
 
10
  from dualstylegan import Model
11
 
12
- DESCRIPTION = '''# Portrait Style Transfer with <a href="https://github.com/williamyang1991/DualStyleGAN">DualStyleGAN</a>
13
 
14
  <img id="overview" alt="overview" src="https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images/overview.jpg" />
15
  '''
16
- FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.dualstylegan" />'
17
-
18
-
19
- def parse_args() -> argparse.Namespace:
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument('--device', type=str, default='cpu')
22
- parser.add_argument('--theme', type=str)
23
- parser.add_argument('--share', action='store_true')
24
- parser.add_argument('--port', type=int)
25
- parser.add_argument('--disable-queue',
26
- dest='enable_queue',
27
- action='store_false')
28
- return parser.parse_args()
29
 
30
 
31
  def get_style_image_url(style_name: str) -> str:
@@ -57,186 +43,162 @@ def update_slider(choice: str) -> dict:
57
  'pixar': 121,
58
  'slamdunk': 119,
59
  }
60
- return gr.Slider.update(maximum=max_vals[choice])
61
 
62
 
63
  def update_style_image(style_name: str) -> dict:
64
  text = get_style_image_markdown_text(style_name)
65
- return gr.Markdown.update(value=text)
66
-
67
-
68
- def set_example_image(example: list) -> dict:
69
- return gr.Image.update(value=example[0])
70
-
71
-
72
- def set_example_styles(example: list) -> list[dict]:
73
- return [
74
- gr.Radio.update(value=example[0]),
75
- gr.Slider.update(value=example[1]),
76
- ]
77
-
78
 
79
- def set_example_weights(example: list) -> list[dict]:
80
- return [
81
- gr.Slider.update(value=example[0]),
82
- gr.Slider.update(value=example[1]),
83
- ]
84
 
 
85
 
86
- def main():
87
- args = parse_args()
88
- model = Model(device=args.device)
89
 
90
- with gr.Blocks(theme=args.theme, css='style.css') as demo:
91
- gr.Markdown(DESCRIPTION)
92
-
93
- with gr.Box():
94
- gr.Markdown('''## Step 1 (Preprocess Input Image)
95
 
96
  - Drop an image containing a near-frontal face to the **Input Image**.
97
- - If there are multiple faces in the image, hit the Edit button in the upper right corner and crop the input image beforehand.
98
  - Hit the **Preprocess** button.
99
- - Choose the encoder version. Default is Z+ encoder which has better stylization performance. W+ encoder better reconstructs the input image to preserve more details.
100
- - The final result will be based on this **Reconstructed Face**. So, if the reconstructed image is not satisfactory, you may want to change the input image.
101
  ''')
102
- with gr.Row():
103
- encoder_type = gr.Radio(choices=['Z+ encoder (better stylization)', 'W+ encoder (better reconstruction)'],
104
- value='Z+ encoder (better stylization)',
105
- label='Encoder Type')
106
- with gr.Row():
107
- with gr.Column():
108
- with gr.Row():
109
- input_image = gr.Image(label='Input Image',
110
- type='file')
111
- with gr.Row():
112
- preprocess_button = gr.Button('Preprocess')
113
- with gr.Column():
114
- with gr.Row():
115
- aligned_face = gr.Image(label='Aligned Face',
116
- type='numpy',
117
- interactive=False)
118
- with gr.Column():
119
- reconstructed_face = gr.Image(label='Reconstructed Face',
120
- type='numpy')
121
- instyle = gr.Variable()
122
-
123
- with gr.Row():
124
- paths = sorted(pathlib.Path('images').glob('*.jpg'))
125
- example_images = gr.Dataset(components=[input_image],
126
- samples=[[path.as_posix()]
127
- for path in paths])
128
-
129
- with gr.Box():
130
- gr.Markdown('''## Step 2 (Select Style Image)
 
 
131
 
132
  - Select **Style Type**.
133
  - Select **Style Image Index** from the image table below.
134
  ''')
135
- with gr.Row():
136
- with gr.Column():
137
- style_type = gr.Radio(model.style_types,
138
- label='Style Type')
139
- text = get_style_image_markdown_text('cartoon')
140
- style_image = gr.Markdown(value=text)
141
- style_index = gr.Slider(0,
142
- 316,
143
- value=26,
144
- step=1,
145
- label='Style Image Index')
146
-
147
- with gr.Row():
148
- example_styles = gr.Dataset(
149
- components=[style_type, style_index],
150
- samples=[
151
- ['cartoon', 26],
152
- ['caricature', 65],
153
- ['arcane', 63],
154
- ['pixar', 80],
155
- ])
156
-
157
- with gr.Box():
158
- gr.Markdown('''## Step 3 (Generate Style Transferred Image)
 
 
159
 
160
  - Adjust **Structure Weight** and **Color Weight**.
161
- - These are weights for the style image, so the larger the value, the closer the resulting image will be to the style image.
162
- - Tips: For W+ encoder, better way of (Structure Only) is to uncheck (Structure Only) and set Color weight to 0.
163
  - Hit the **Generate** button.
164
  ''')
165
- with gr.Row():
166
- with gr.Column():
167
- with gr.Row():
168
- structure_weight = gr.Slider(0,
169
- 1,
170
- value=0.6,
171
- step=0.1,
172
- label='Structure Weight')
173
- with gr.Row():
174
- color_weight = gr.Slider(0,
175
- 1,
176
- value=1,
177
  step=0.1,
178
- label='Color Weight')
179
- with gr.Row():
180
- structure_only = gr.Checkbox(label='Structure Only')
181
- with gr.Row():
182
- generate_button = gr.Button('Generate')
183
-
184
- with gr.Column():
185
- result = gr.Image(label='Result')
186
-
187
- with gr.Row():
188
- example_weights = gr.Dataset(
189
- components=[structure_weight, color_weight],
190
- samples=[
191
- [0.6, 1.0],
192
- [0.3, 1.0],
193
- [0.0, 1.0],
194
- [1.0, 0.0],
195
- ])
196
-
197
- gr.Markdown(FOOTER)
198
-
199
- preprocess_button.click(fn=model.detect_and_align_face,
200
- inputs=[input_image],
201
- outputs=aligned_face)
202
- aligned_face.change(fn=model.reconstruct_face,
203
- inputs=[aligned_face, encoder_type],
204
- outputs=[
205
- reconstructed_face,
206
- instyle,
207
- ])
208
- style_type.change(fn=update_slider,
209
- inputs=style_type,
210
- outputs=style_index)
211
- style_type.change(fn=update_style_image,
212
- inputs=style_type,
213
- outputs=style_image)
214
- generate_button.click(fn=model.generate,
215
- inputs=[
216
- style_type,
217
- style_index,
218
- structure_weight,
219
- color_weight,
220
- structure_only,
221
- instyle,
222
- ],
223
- outputs=result)
224
- example_images.click(fn=set_example_image,
225
- inputs=example_images,
226
- outputs=example_images.components)
227
- example_styles.click(fn=set_example_styles,
228
- inputs=example_styles,
229
- outputs=example_styles.components)
230
- example_weights.click(fn=set_example_weights,
231
- inputs=example_weights,
232
- outputs=example_weights.components)
233
-
234
- demo.launch(
235
- enable_queue=args.enable_queue,
236
- server_port=args.port,
237
- share=args.share,
238
  )
239
-
240
-
241
- if __name__ == '__main__':
242
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  from __future__ import annotations
4
 
 
5
  import pathlib
6
 
7
  import gradio as gr
8
 
9
  from dualstylegan import Model
10
 
11
+ DESCRIPTION = '''# Portrait Style Transfer with [DualStyleGAN](https://github.com/williamyang1991/DualStyleGAN)
12
 
13
  <img id="overview" alt="overview" src="https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images/overview.jpg" />
14
  '''
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
 
17
  def get_style_image_url(style_name: str) -> str:
43
  'pixar': 121,
44
  'slamdunk': 119,
45
  }
46
+ return gr.update(maximum=max_vals[choice])
47
 
48
 
49
  def update_style_image(style_name: str) -> dict:
50
  text = get_style_image_markdown_text(style_name)
51
+ return gr.update(value=text)
 
 
 
 
 
 
 
 
 
 
 
 
52
 
 
 
 
 
 
53
 
54
+ model = Model()
55
 
56
+ with gr.Blocks(css='style.css') as demo:
57
+ gr.Markdown(DESCRIPTION)
 
58
 
59
+ with gr.Box():
60
+ gr.Markdown('''## Step 1 (Preprocess Input Image)
 
 
 
61
 
62
  - Drop an image containing a near-frontal face to the **Input Image**.
63
+ - If there are multiple faces in the image, hit the Edit button in the upper right corner and crop the input image beforehand.
64
  - Hit the **Preprocess** button.
65
+ - Choose the encoder version. Default is Z+ encoder which has better stylization performance. W+ encoder better reconstructs the input image to preserve more details.
66
+ - The final result will be based on this **Reconstructed Face**. So, if the reconstructed image is not satisfactory, you may want to change the input image.
67
  ''')
68
+ with gr.Row():
69
+ encoder_type = gr.Radio(label='Encoder Type',
70
+ choices=[
71
+ 'Z+ encoder (better stylization)',
72
+ 'W+ encoder (better reconstruction)'
73
+ ],
74
+ value='Z+ encoder (better stylization)')
75
+ with gr.Row():
76
+ with gr.Column():
77
+ with gr.Row():
78
+ input_image = gr.Image(label='Input Image',
79
+ type='filepath')
80
+ with gr.Row():
81
+ preprocess_button = gr.Button('Preprocess')
82
+ with gr.Column():
83
+ with gr.Row():
84
+ aligned_face = gr.Image(label='Aligned Face',
85
+ type='numpy',
86
+ interactive=False)
87
+ with gr.Column():
88
+ reconstructed_face = gr.Image(label='Reconstructed Face',
89
+ type='numpy')
90
+ instyle = gr.State()
91
+
92
+ with gr.Row():
93
+ paths = sorted(pathlib.Path('images').glob('*.jpg'))
94
+ gr.Examples(examples=[[path.as_posix()] for path in paths],
95
+ inputs=input_image)
96
+
97
+ with gr.Box():
98
+ gr.Markdown('''## Step 2 (Select Style Image)
99
 
100
  - Select **Style Type**.
101
  - Select **Style Image Index** from the image table below.
102
  ''')
103
+ with gr.Row():
104
+ with gr.Column():
105
+ style_type = gr.Radio(label='Style Type',
106
+ choices=model.style_types,
107
+ value=model.style_types[0])
108
+ text = get_style_image_markdown_text('cartoon')
109
+ style_image = gr.Markdown(value=text)
110
+ style_index = gr.Slider(label='Style Image Index',
111
+ minimum=0,
112
+ maximum=316,
113
+ step=1,
114
+ value=26)
115
+
116
+ with gr.Row():
117
+ gr.Examples(
118
+ examples=[
119
+ ['cartoon', 26],
120
+ ['caricature', 65],
121
+ ['arcane', 63],
122
+ ['pixar', 80],
123
+ ],
124
+ inputs=[style_type, style_index],
125
+ )
126
+
127
+ with gr.Box():
128
+ gr.Markdown('''## Step 3 (Generate Style Transferred Image)
129
 
130
  - Adjust **Structure Weight** and **Color Weight**.
131
+ - These are weights for the style image, so the larger the value, the closer the resulting image will be to the style image.
132
+ - Tips: For W+ encoder, better way of (Structure Only) is to uncheck (Structure Only) and set Color weight to 0.
133
  - Hit the **Generate** button.
134
  ''')
135
+ with gr.Row():
136
+ with gr.Column():
137
+ with gr.Row():
138
+ structure_weight = gr.Slider(label='Structure Weight',
139
+ minimum=0,
140
+ maximum=1,
 
 
 
 
 
 
141
  step=0.1,
142
+ value=0.6)
143
+ with gr.Row():
144
+ color_weight = gr.Slider(label='Color Weight',
145
+ minimum=0,
146
+ maximum=1,
147
+ step=0.1,
148
+ value=1)
149
+ with gr.Row():
150
+ structure_only = gr.Checkbox(label='Structure Only',
151
+ value=False)
152
+ with gr.Row():
153
+ generate_button = gr.Button('Generate')
154
+
155
+ with gr.Column():
156
+ result = gr.Image(label='Result')
157
+
158
+ with gr.Row():
159
+ gr.Examples(
160
+ examples=[
161
+ [0.6, 1.0],
162
+ [0.3, 1.0],
163
+ [0.0, 1.0],
164
+ [1.0, 0.0],
165
+ ],
166
+ inputs=[structure_weight, color_weight],
167
+ )
168
+
169
+ preprocess_button.click(
170
+ fn=model.detect_and_align_face,
171
+ inputs=[input_image],
172
+ outputs=aligned_face,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  )
174
+ aligned_face.change(
175
+ fn=model.reconstruct_face,
176
+ inputs=[aligned_face, encoder_type],
177
+ outputs=[
178
+ reconstructed_face,
179
+ instyle,
180
+ ],
181
+ )
182
+ style_type.change(
183
+ fn=update_slider,
184
+ inputs=style_type,
185
+ outputs=style_index,
186
+ )
187
+ style_type.change(
188
+ fn=update_style_image,
189
+ inputs=style_type,
190
+ outputs=style_image,
191
+ )
192
+ generate_button.click(
193
+ fn=model.generate,
194
+ inputs=[
195
+ style_type,
196
+ style_index,
197
+ structure_weight,
198
+ color_weight,
199
+ structure_only,
200
+ instyle,
201
+ ],
202
+ outputs=result,
203
+ )
204
+ demo.queue(max_size=20).launch()
dualstylegan.py CHANGED
@@ -31,8 +31,9 @@ MODEL_REPO = 'CVPR/DualStyleGAN'
31
 
32
 
33
  class Model:
34
- def __init__(self, device: torch.device | str):
35
- self.device = torch.device(device)
 
36
  self.landmark_model = self._create_dlib_landmark_model()
37
  self.encoder_dict = self._load_encoder()
38
  self.transform = self._create_transform()
@@ -77,14 +78,14 @@ class Model:
77
  model = pSp(opts)
78
  model.to(self.device)
79
  model.eval()
80
-
81
  ckpt_path = huggingface_hub.hf_hub_download(MODEL_REPO,
82
  'models/encoder_wplus.pt')
83
  ckpt = torch.load(ckpt_path, map_location='cpu')
84
  opts = ckpt['opts']
85
  opts['device'] = self.device.type
86
  opts['checkpoint_path'] = ckpt_path
87
- opts['output_size'] = 1024
88
  opts = argparse.Namespace(**opts)
89
  model2 = pSp(opts)
90
  model2.to(self.device)
@@ -123,11 +124,13 @@ class Model:
123
  exstyles = np.load(path, allow_pickle=True).item()
124
  return exstyles
125
 
126
- def detect_and_align_face(self, image) -> np.ndarray:
127
- image = align_face(filepath=image.name, predictor=self.landmark_model)
128
  x, y = np.random.randint(255), np.random.randint(255)
129
  r, g, b = image.getpixel((x, y))
130
- image.putpixel((x, y), (r, g+1, b)) # trick to make sure run reconstruct_face() once any input setting changes
 
 
131
  return image
132
 
133
  @staticmethod
@@ -139,24 +142,25 @@ class Model:
139
  return tensor.cpu().numpy().transpose(1, 2, 0)
140
 
141
  @torch.inference_mode()
142
- def reconstruct_face(self,
143
- image: np.ndarray, encoder_type: str) -> tuple[np.ndarray, torch.Tensor]:
144
  if encoder_type == 'Z+ encoder (better stylization)':
145
  self.encoder_type = 'z+'
146
  z_plus_latent = True
147
- return_z_plus_latent = True
148
  else:
149
  self.encoder_type = 'w+'
150
  z_plus_latent = False
151
- return_z_plus_latent = False
152
  image = PIL.Image.fromarray(image)
153
  input_data = self.transform(image).unsqueeze(0).to(self.device)
154
- img_rec, instyle = self.encoder_dict[self.encoder_type](input_data,
155
- randomize_noise=False,
156
- return_latents=True,
157
- z_plus_latent=z_plus_latent,
158
- return_z_plus_latent=return_z_plus_latent,
159
- resize=False)
 
160
  img_rec = torch.clamp(img_rec.detach(), -1, 1)
161
  img_rec = self.postprocess(img_rec[0])
162
  return img_rec, instyle
@@ -166,13 +170,12 @@ class Model:
166
  color_weight: float, structure_only: bool,
167
  instyle: torch.Tensor) -> np.ndarray:
168
 
169
-
170
  if self.encoder_type == 'z+':
171
  z_plus_latent = True
172
  input_is_latent = False
173
  else:
174
  z_plus_latent = False
175
- input_is_latent = True
176
 
177
  generator = self.generator_dict[style_type]
178
  exstyles = self.exstyle_dict[style_type]
@@ -187,7 +190,7 @@ class Model:
187
  latent.reshape(latent.shape[0] * latent.shape[1],
188
  latent.shape[2])).reshape(latent.shape)
189
  if structure_only and self.encoder_type == 'w+':
190
- exstyle[:,7:18] = instyle[:,7:18]
191
 
192
  img_gen, _ = generator([instyle],
193
  exstyle,
@@ -200,4 +203,4 @@ class Model:
200
  [color_weight] * 11)
201
  img_gen = torch.clamp(img_gen.detach(), -1, 1)
202
  img_gen = self.postprocess(img_gen[0])
203
- return img_gen
31
 
32
 
33
  class Model:
34
+ def __init__(self):
35
+ self.device = torch.device(
36
+ 'cuda:0' if torch.cuda.is_available() else 'cpu')
37
  self.landmark_model = self._create_dlib_landmark_model()
38
  self.encoder_dict = self._load_encoder()
39
  self.transform = self._create_transform()
78
  model = pSp(opts)
79
  model.to(self.device)
80
  model.eval()
81
+
82
  ckpt_path = huggingface_hub.hf_hub_download(MODEL_REPO,
83
  'models/encoder_wplus.pt')
84
  ckpt = torch.load(ckpt_path, map_location='cpu')
85
  opts = ckpt['opts']
86
  opts['device'] = self.device.type
87
  opts['checkpoint_path'] = ckpt_path
88
+ opts['output_size'] = 1024
89
  opts = argparse.Namespace(**opts)
90
  model2 = pSp(opts)
91
  model2.to(self.device)
124
  exstyles = np.load(path, allow_pickle=True).item()
125
  return exstyles
126
 
127
+ def detect_and_align_face(self, image_path) -> np.ndarray:
128
+ image = align_face(filepath=image_path, predictor=self.landmark_model)
129
  x, y = np.random.randint(255), np.random.randint(255)
130
  r, g, b = image.getpixel((x, y))
131
+ image.putpixel(
132
+ (x, y), (r, g + 1, b)
133
+ ) # trick to make sure run reconstruct_face() once any input setting changes
134
  return image
135
 
136
  @staticmethod
142
  return tensor.cpu().numpy().transpose(1, 2, 0)
143
 
144
  @torch.inference_mode()
145
+ def reconstruct_face(self, image: np.ndarray,
146
+ encoder_type: str) -> tuple[np.ndarray, torch.Tensor]:
147
  if encoder_type == 'Z+ encoder (better stylization)':
148
  self.encoder_type = 'z+'
149
  z_plus_latent = True
150
+ return_z_plus_latent = True
151
  else:
152
  self.encoder_type = 'w+'
153
  z_plus_latent = False
154
+ return_z_plus_latent = False
155
  image = PIL.Image.fromarray(image)
156
  input_data = self.transform(image).unsqueeze(0).to(self.device)
157
+ img_rec, instyle = self.encoder_dict[self.encoder_type](
158
+ input_data,
159
+ randomize_noise=False,
160
+ return_latents=True,
161
+ z_plus_latent=z_plus_latent,
162
+ return_z_plus_latent=return_z_plus_latent,
163
+ resize=False)
164
  img_rec = torch.clamp(img_rec.detach(), -1, 1)
165
  img_rec = self.postprocess(img_rec[0])
166
  return img_rec, instyle
170
  color_weight: float, structure_only: bool,
171
  instyle: torch.Tensor) -> np.ndarray:
172
 
 
173
  if self.encoder_type == 'z+':
174
  z_plus_latent = True
175
  input_is_latent = False
176
  else:
177
  z_plus_latent = False
178
+ input_is_latent = True
179
 
180
  generator = self.generator_dict[style_type]
181
  exstyles = self.exstyle_dict[style_type]
190
  latent.reshape(latent.shape[0] * latent.shape[1],
191
  latent.shape[2])).reshape(latent.shape)
192
  if structure_only and self.encoder_type == 'w+':
193
+ exstyle[:, 7:18] = instyle[:, 7:18]
194
 
195
  img_gen, _ = generator([instyle],
196
  exstyle,
203
  [color_weight] * 11)
204
  img_gen = torch.clamp(img_gen.detach(), -1, 1)
205
  img_gen = self.postprocess(img_gen[0])
206
+ return img_gen
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- dlib==19.23.0
2
- numpy==1.22.3
3
- opencv-python-headless==4.5.5.62
4
- Pillow==9.0.1
5
- scipy==1.8.0
6
- torch==1.11.0
7
- torchvision==0.12.0
1
+ dlib==19.24.2
2
+ numpy==1.23.5
3
+ opencv-python-headless==4.8.0.74
4
+ Pillow==9.5.0
5
+ scipy==1.11.1
6
+ torch==2.0.1
7
+ torchvision==0.15.2
style.css CHANGED
@@ -1,19 +1,17 @@
1
  h1 {
2
  text-align: center;
3
  }
 
4
  img#overview {
5
  max-width: 1000px;
6
  max-height: 600px;
7
  display: block;
8
  margin: auto;
9
  }
 
10
  img#style-image {
11
  max-width: 1000px;
12
  max-height: 600px;
13
  display: block;
14
  margin: auto;
15
  }
16
- img#visitor-badge {
17
- display: block;
18
- margin: auto;
19
- }
1
  h1 {
2
  text-align: center;
3
  }
4
+
5
  img#overview {
6
  max-width: 1000px;
7
  max-height: 600px;
8
  display: block;
9
  margin: auto;
10
  }
11
+
12
  img#style-image {
13
  max-width: 1000px;
14
  max-height: 600px;
15
  display: block;
16
  margin: auto;
17
  }