fffiloni commited on
Commit
3bddfce
β€’
1 Parent(s): 70e88e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -177
app.py CHANGED
@@ -1,14 +1,15 @@
1
  import gradio as gr
2
- import torch
3
  import whisper
4
  from datetime import datetime
5
  from PIL import Image
6
-
7
  import os
8
- MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
9
 
10
- from diffusers import StableDiffusionPipeline
11
 
 
12
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
13
 
14
  title="Whisper to Stable Diffusion"
@@ -17,55 +18,62 @@ title="Whisper to Stable Diffusion"
17
 
18
  whisper_model = whisper.load_model("small")
19
 
20
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
21
 
22
- pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=MY_SECRET_TOKEN)
23
- pipe.to(device)
24
 
25
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
26
 
 
 
 
 
 
27
  def magic_whisper_to_sd(audio, guidance_scale, nb_iterations, seed):
28
 
29
  whisper_results = translate(audio)
30
  prompt = whisper_results[2]
31
- images = diffuse(prompt, guidance_scale, nb_iterations, seed)
32
 
33
  return whisper_results[0], whisper_results[1], whisper_results[2], images
34
 
35
- def diffuse(prompt, guidance_scale, nb_iterations, seed):
36
-
37
- generator = torch.Generator(device=device).manual_seed(int(seed))
38
-
39
- print("""
40
- β€”
41
- Sending prompt to Stable Diffusion ...
42
- β€”
43
- """)
44
- print("prompt: " + prompt)
45
- print("guidance scale: " + str(guidance_scale))
46
- print("inference steps: " + str(nb_iterations))
47
- print("seed: " + str(seed))
48
-
49
- images_list = pipe(
50
- [prompt] * 2,
51
- guidance_scale=guidance_scale,
52
- num_inference_steps=nb_iterations,
53
- generator=generator
54
- )
55
-
56
- images = []
57
- safe_image = Image.open(r"unsafe.png")
58
-
59
- for i, image in enumerate(images_list["sample"]):
60
- if(images_list["nsfw_content_detected"][i]):
61
- images.append(safe_image)
62
- else:
63
- images.append(image)
64
-
65
- print("Stable Diffusion has finished")
66
- print("β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”")
67
-
68
- return images
 
 
69
 
70
  def translate(audio):
71
  print("""
@@ -96,14 +104,17 @@ def translate(audio):
96
  print("transcript: " + transcription.text)
97
  print("β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”")
98
  print("translated: " + translation.text)
99
-
100
- return transcription.language, transcription.text, translation.text
 
 
 
101
 
102
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
103
 
104
  css = """
105
  .container {
106
- max-width: 1280px;
107
  margin: auto;
108
  padding-top: 1.5rem;
109
  }
@@ -260,140 +271,141 @@ css = """
260
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
261
 
262
  with gr.Blocks(css=css) as demo:
263
- gr.HTML('''
264
- <h1>
265
- Whisper to Stable Diffusion
266
- </h1>
267
- <p style='text-align: center;'>
268
- Ask stable diffusion for images by speaking (or singing πŸ€—) in your native language ! Try it in French πŸ˜‰
269
- </p>
270
-
271
- <p style='text-align: center;'>
272
- This demo is running on 🐒 CPU β€’ Offered by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a> β€’ <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.whisper-to-stable-diffusion' style='display: inline-block' /><br />
273
- β€”
274
- </p>
275
-
276
- ''')
277
- with gr.Row(elem_id="w2sd_container"):
278
- with gr.Column():
279
-
280
- gr.Markdown(
281
- """
282
-
283
- ## 1. Record audio or Upload an audio file:
284
- """
285
- )
286
-
287
- with gr.Tab(label="Record audio input", elem_id="record_tab"):
288
- with gr.Column():
289
- record_input = gr.Audio(
290
- source="microphone",
291
- type="filepath",
292
- show_label=False,
293
- elem_id="record_btn"
294
- )
295
- with gr.Row():
296
- audio_r_translate = gr.Button("Check Whisper first ? πŸ‘", elem_id="check_btn_1")
297
- audio_r_direct_sd = gr.Button("Magic Whisper β€Ί SD right now!", elem_id="magic_btn_1")
298
 
299
- with gr.Tab(label="Upload audio input", elem_id="upload_tab"):
300
- with gr.Column():
301
- upload_input = gr.Audio(
302
- source="upload",
303
- type="filepath",
304
- show_label=False,
305
- elem_id="upload_area"
306
- )
307
- with gr.Row():
308
- audio_u_translate = gr.Button("Check Whisper first ? πŸ‘", elem_id="check_btn_2")
309
- audio_u_direct_sd = gr.Button("Magic Whisper β€Ί SD right now!", elem_id="magic_btn_2")
310
 
311
- with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  with gr.Row():
313
- guidance_scale = gr.Slider(2, 15, value = 7, label = 'Guidance Scale')
314
- nb_iterations = gr.Slider(10, 50, value = 25, step = 1, label = 'Steps')
315
- seed = gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
- gr.Markdown(
318
- """
319
- ## 2. Check Whisper output, correct it if necessary:
320
- """
321
- )
 
322
 
 
 
 
 
 
 
323
  with gr.Row():
 
 
 
 
 
 
324
 
325
- transcripted_output = gr.Textbox(
326
- label="Transcription in your detected spoken language",
327
- lines=3,
328
- elem_id="transcripted"
329
- )
330
- language_detected_output = gr.Textbox(label="Native language", elem_id="spoken_lang",lines=3)
331
 
332
- with gr.Column():
333
- translated_output = gr.Textbox(
334
- label="Transcript translated in English by Whisper",
335
- lines=4,
336
- elem_id="translated"
337
- )
338
- with gr.Row():
339
- clear_btn = gr.Button(value="Clear")
340
- diffuse_btn = gr.Button(value="OK, Diffuse this prompt !", elem_id="diffuse_btn")
341
 
342
- clear_btn.click(fn=lambda value: gr.update(value=""), inputs=clear_btn, outputs=translated_output)
 
 
 
 
 
 
 
 
343
 
344
-
345
-
346
-
347
 
348
- with gr.Column():
349
-
350
-
351
 
352
- gr.Markdown("""
353
- ## 3. Wait for Stable Diffusion Results β˜•οΈ
354
- Inference time is about ~5-10 minutes, when it's your turn 😬
355
- """
356
- )
357
- sd_output = gr.Gallery().style(grid=2, height="auto")
358
-
359
-
360
-
361
- gr.Markdown("""
362
- ### πŸ“Œ About the models
363
- <p style='font-size: 1em;line-height: 1.5em;'>
364
- <strong>Whisper</strong> is a general-purpose speech recognition model.<br /><br />
365
- It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. <br />
366
- β€”
367
- </p>
368
- <p style='font-size: 1em;line-height: 1.5em;'>
369
- <strong>Stable Diffusion</strong> is a state of the art text-to-image model that generates images from text.
370
- </p>
371
- <div id="notice">
372
- <div>
373
- LICENSE
374
- <p style='font-size: 0.8em;'>
375
- The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank">CreativeML Open RAIL-M</a> license.</p>
376
- <p style='font-size: 0.8em;'>
377
- The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license.</p>
378
- <p style='font-size: 0.8em;'>
379
- The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups.</p>
380
- <p style='font-size: 0.8em;'>
381
- For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" target="_blank">read the license</a>.
382
- </p>
383
- </div>
384
- <div>
385
- Biases and content acknowledgment
386
- <p style='font-size: 0.8em;'>
387
- Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence.</p>
388
- <p style='font-size: 0.8em;'>
389
- The model was trained on the <a href="https://laion.ai/blog/laion-5b/" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes.</p>
390
- <p style='font-size: 0.8em;'> You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" target="_blank">model card</a>.
391
- </p>
392
- </div>
393
  </div>
 
 
 
 
 
 
 
 
 
 
394
 
395
- """, elem_id="about")
396
-
397
  audio_r_translate.click(translate,
398
  inputs = record_input,
399
  outputs = [
@@ -438,21 +450,18 @@ with gr.Blocks(css=css) as demo:
438
  sd_output
439
  ])
440
 
441
- diffuse_btn.click(diffuse,
442
  inputs = [
443
- translated_output,
444
- guidance_scale,
445
- nb_iterations,
446
- seed
447
- ],
448
  outputs = sd_output
449
  )
450
- gr.HTML('''
451
- <div class="footer">
452
- <p>Whisper by <a href="https://github.com/openai/whisper" target="_blank">OpenAI</a> - Stable Diffusion by <a href="https://huggingface.co/CompVis" target="_blank">CompVis</a> and <a href="https://huggingface.co/stabilityai" target="_blank">Stability AI</a>
453
- </p>
454
- </div>
455
- ''')
456
 
457
 
458
  if __name__ == "__main__":
1
  import gradio as gr
2
+ #import torch
3
  import whisper
4
  from datetime import datetime
5
  from PIL import Image
6
+ import flag
7
  import os
8
+ #MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
9
 
10
+ #from diffusers import StableDiffusionPipeline
11
 
12
+ stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
13
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
14
 
15
  title="Whisper to Stable Diffusion"
18
 
19
  whisper_model = whisper.load_model("small")
20
 
21
+ #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
22
 
23
+ #pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=MY_SECRET_TOKEN)
24
+ #pipe.to(device)
25
 
26
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
27
 
28
+ def get_images(prompt):
29
+ gallery_dir = stable_diffusion(prompt, fn_index=2)
30
+ return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
31
+
32
+
33
  def magic_whisper_to_sd(audio, guidance_scale, nb_iterations, seed):
34
 
35
  whisper_results = translate(audio)
36
  prompt = whisper_results[2]
37
+ images = get_images(prompt)
38
 
39
  return whisper_results[0], whisper_results[1], whisper_results[2], images
40
 
41
+ #def diffuse(prompt, guidance_scale, nb_iterations, seed):
42
+ #
43
+ # generator = torch.Generator(device=device).manual_seed(int(seed))
44
+ #
45
+ # print("""
46
+ # β€”
47
+ # Sending prompt to Stable Diffusion ...
48
+ # β€”
49
+ # """)
50
+ # print("prompt: " + prompt)
51
+ # print("guidance scale: " + str(guidance_scale))
52
+ # print("inference steps: " + str(nb_iterations))
53
+ # print("seed: " + str(seed))
54
+ #
55
+ # images_list = pipe(
56
+ # [prompt] * 2,
57
+ # guidance_scale=guidance_scale,
58
+ # num_inference_steps=nb_iterations,
59
+ # generator=generator
60
+ # )
61
+ #
62
+ # images = []
63
+ #
64
+ # safe_image = Image.open(r"unsafe.png")
65
+ #
66
+ # for i, image in enumerate(images_list["sample"]):
67
+ # if(images_list["nsfw_content_detected"][i]):
68
+ # images.append(safe_image)
69
+ # else:
70
+ # images.append(image)
71
+ #
72
+ #
73
+ # print("Stable Diffusion has finished")
74
+ # print("β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”")
75
+ #
76
+ # return images
77
 
78
  def translate(audio):
79
  print("""
104
  print("transcript: " + transcription.text)
105
  print("β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”")
106
  print("translated: " + translation.text)
107
+ if transcription.language == "en":
108
+ tr_flag = flag.flag('GB')
109
+ else:
110
+ tr_flag = flag.flag(transcription.language)
111
+ return tr_flag, transcription.text, translation.text
112
 
113
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
114
 
115
  css = """
116
  .container {
117
+ max-width: 880px;
118
  margin: auto;
119
  padding-top: 1.5rem;
120
  }
271
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
272
 
273
  with gr.Blocks(css=css) as demo:
274
+ with gr.Column():
275
+ gr.HTML('''
276
+ <h1>
277
+ Whisper to Stable Diffusion
278
+ </h1>
279
+ <p style='text-align: center;'>
280
+ Ask stable diffusion for images by speaking (or singing πŸ€—) in your native language ! Try it in French πŸ˜‰
281
+ </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
 
283
+ <p style='text-align: center;'>
284
+ This demo is wired to the official SD Space β€’ Offered by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a> β€’ <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.whisper-to-stable-diffusion' style='display: inline-block' /><br />
285
+ β€”
286
+ </p>
287
+
288
+ ''')
289
+ # with gr.Row(elem_id="w2sd_container"):
290
+ # with gr.Column():
 
 
 
291
 
292
+ gr.Markdown(
293
+ """
294
+
295
+ ## 1. Record audio or Upload an audio file:
296
+ """
297
+ )
298
+
299
+ with gr.Tab(label="Record audio input", elem_id="record_tab"):
300
+ with gr.Column():
301
+ record_input = gr.Audio(
302
+ source="microphone",
303
+ type="filepath",
304
+ show_label=False,
305
+ elem_id="record_btn"
306
+ )
307
+ with gr.Row():
308
+ audio_r_translate = gr.Button("Check Whisper first ? πŸ‘", elem_id="check_btn_1")
309
+ audio_r_direct_sd = gr.Button("Magic Whisper β€Ί SD right now!", elem_id="magic_btn_1")
310
+
311
+ with gr.Tab(label="Upload audio input", elem_id="upload_tab"):
312
+ with gr.Column():
313
+ upload_input = gr.Audio(
314
+ source="upload",
315
+ type="filepath",
316
+ show_label=False,
317
+ elem_id="upload_area"
318
+ )
319
  with gr.Row():
320
+ audio_u_translate = gr.Button("Check Whisper first ? πŸ‘", elem_id="check_btn_2")
321
+ audio_u_direct_sd = gr.Button("Magic Whisper β€Ί SD right now!", elem_id="magic_btn_2")
322
+
323
+ with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings", visible=False):
324
+ with gr.Row():
325
+ guidance_scale = gr.Slider(2, 15, value = 7, label = 'Guidance Scale')
326
+ nb_iterations = gr.Slider(10, 50, value = 25, step = 1, label = 'Steps')
327
+ seed = gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
328
+
329
+ gr.Markdown(
330
+ """
331
+ ## 2. Check Whisper output, correct it if necessary:
332
+ """
333
+ )
334
+
335
+ with gr.Row():
336
 
337
+ transcripted_output = gr.Textbox(
338
+ label="Transcription in your detected spoken language",
339
+ lines=3,
340
+ elem_id="transcripted"
341
+ )
342
+ language_detected_output = gr.Textbox(label="Native language", elem_id="spoken_lang",lines=3)
343
 
344
+ with gr.Column():
345
+ translated_output = gr.Textbox(
346
+ label="Transcript translated in English by Whisper",
347
+ lines=4,
348
+ elem_id="translated"
349
+ )
350
  with gr.Row():
351
+ clear_btn = gr.Button(value="Clear")
352
+ diffuse_btn = gr.Button(value="OK, Diffuse this prompt !", elem_id="diffuse_btn")
353
+
354
+ clear_btn.click(fn=lambda value: gr.update(value=""), inputs=clear_btn, outputs=translated_output)
355
+
356
+
357
 
 
 
 
 
 
 
358
 
 
 
 
 
 
 
 
 
 
359
 
360
+ # with gr.Column():
361
+
362
+
363
+
364
+ gr.Markdown("""
365
+ ## 3. Wait for Stable Diffusion Results β˜•οΈ
366
+ Inference time is about ~20-30 seconds, when it's your turn 😬
367
+ """
368
+ )
369
 
370
+ sd_output = gr.Gallery().style(grid=2, height="auto")
 
 
371
 
 
 
 
372
 
373
+ gr.Markdown("""
374
+ ### πŸ“Œ About the models
375
+ <p style='font-size: 1em;line-height: 1.5em;'>
376
+ <strong>Whisper</strong> is a general-purpose speech recognition model.<br /><br />
377
+ It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. <br />
378
+ β€”
379
+ </p>
380
+ <p style='font-size: 1em;line-height: 1.5em;'>
381
+ <strong>Stable Diffusion</strong> is a state of the art text-to-image model that generates images from text.
382
+ </p>
383
+ <div id="notice">
384
+ <div>
385
+ LICENSE
386
+ <p style='font-size: 0.8em;'>
387
+ The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank">CreativeML Open RAIL-M</a> license.</p>
388
+ <p style='font-size: 0.8em;'>
389
+ The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license.</p>
390
+ <p style='font-size: 0.8em;'>
391
+ The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups.</p>
392
+ <p style='font-size: 0.8em;'>
393
+ For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" target="_blank">read the license</a>.
394
+ </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395
  </div>
396
+ <div>
397
+ Biases and content acknowledgment
398
+ <p style='font-size: 0.8em;'>
399
+ Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence.</p>
400
+ <p style='font-size: 0.8em;'>
401
+ The model was trained on the <a href="https://laion.ai/blog/laion-5b/" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes.</p>
402
+ <p style='font-size: 0.8em;'> You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" target="_blank">model card</a>.
403
+ </p>
404
+ </div>
405
+ </div>
406
 
407
+ """, elem_id="about")
408
+
409
  audio_r_translate.click(translate,
410
  inputs = record_input,
411
  outputs = [
450
  sd_output
451
  ])
452
 
453
+ diffuse_btn.click(get_images,
454
  inputs = [
455
+ translated_output
456
+ ],
 
 
 
457
  outputs = sd_output
458
  )
459
+ gr.HTML('''
460
+ <div class="footer">
461
+ <p>Whisper by <a href="https://github.com/openai/whisper" target="_blank">OpenAI</a> - Stable Diffusion by <a href="https://huggingface.co/CompVis" target="_blank">CompVis</a> and <a href="https://huggingface.co/stabilityai" target="_blank">Stability AI</a>
462
+ </p>
463
+ </div>
464
+ ''')
465
 
466
 
467
  if __name__ == "__main__":