jaekookang commited on
Commit
68d2539
โ€ข
1 Parent(s): afb22d5

update minor

Browse files
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ models
2
+ *.log
3
+ *.nohup
4
+ *.db
README.md CHANGED
@@ -8,6 +8,13 @@ app_file: gradio_imagecompletion.py
8
  pinned: false
9
  ---
10
 
 
 
 
 
 
 
 
11
  # Configuration
12
 
13
  `title`: _string_
 
8
  pinned: false
9
  ---
10
 
11
+ # Note
12
+
13
+ - ์ฃผ์–ด์ง„ ์ด๋ฏธ์ง€์˜ ํ•˜๋‹จ๋ถ€ ์ ˆ๋ฐ˜์„ ์ง€์šฐ๊ณ  ์ƒˆ๋กœ ์ฑ„์›Œ์„œ ๊ทธ๋ ค์ฃผ๋Š” AI ๋ฐ๋ชจ์ž…๋‹ˆ๋‹ค
14
+ - ImageGPT ํ™œ์šฉ
15
+ - Paper: https://arxiv.org/abs/2109.10282
16
+ - Code: https://huggingface.co/spaces/nielsr/imagegpt-completion
17
+
18
  # Configuration
19
 
20
  `title`: _string_
examples/eiffel.jpg ADDED
examples/land1.jpg DELETED
Binary file (29.9 kB)
 
examples/land2.jpg DELETED
Binary file (215 kB)
 
examples/land3.jpg DELETED
Binary file (227 kB)
 
examples/plot1.jpg CHANGED
examples/plot2.jpg DELETED
Binary file (127 kB)
 
examples/shrek.jpg ADDED
examples/squid.jpg ADDED
gradio_imagecompletion.py CHANGED
@@ -13,7 +13,6 @@ import matplotlib.pyplot as plt
13
 
14
  import os
15
  import numpy as np
16
- import requests
17
  from glob import glob
18
  import gradio as gr
19
  from loguru import logger
@@ -30,13 +29,20 @@ logger.add('app.log', mode='a')
30
  logger.info('===== APP RESTARTED =====')
31
 
32
  # ========== Models ==========
 
 
 
33
  feature_extractor = ImageGPTFeatureExtractor.from_pretrained(
34
- "openai/imagegpt-medium")
 
 
35
  model = ImageGPTForCausalImageModeling.from_pretrained(
36
- "openai/imagegpt-medium")
37
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38
- model.to(device)
39
-
 
 
40
 
41
  def process_image(image):
42
  logger.info('--- image file received')
@@ -56,7 +62,7 @@ def process_image(image):
56
 
57
  # generate (no beam search)
58
  context = np.concatenate((np.full((batch_size, 1), model.config.vocab_size - 1), primers), axis=1)
59
- context = torch.tensor(context).to(device)
60
  output = model.generate(input_ids=context, max_length=n_px*n_px + 1, temperature=1.0, do_sample=True, top_k=40)
61
  # decode back to images (convert color cluster tokens back to pixels)
62
  samples = output[:,1:].cpu().detach().numpy()
@@ -71,13 +77,14 @@ def process_image(image):
71
 
72
  # return as PIL Image
73
  completion = Image.fromarray(result)
 
74
  return completion
75
 
76
 
77
  iface = gr.Interface(
78
  process_image,
79
  title="์ด๋ฏธ์ง€์˜ ์ ˆ๋ฐ˜์„ ์ง€์šฐ๊ณ  ์ ˆ๋ฐ˜์„ ์ฑ„์›Œ ๋„ฃ์–ด์ฃผ๋Š” Image Completion ๋ฐ๋ชจ์ž…๋‹ˆ๋‹ค (ImageGPT)",
80
- description='์ฃผ์–ด์ง„ ์ด๋ฏธ์ง€์˜ ์ ˆ๋ฐ˜ ์•„๋ž˜๋ฅผ AI๊ฐ€ ์ฑ„์›Œ ๋„ฃ์–ด์ค๋‹ˆ๋‹ค',
81
  inputs=gr.inputs.Image(type="pil", label='์ธํ’‹ ์ด๋ฏธ์ง€'),
82
  outputs=gr.outputs.Image(type="pil", label='AI๊ฐ€ ๊ทธ๋ฆฐ ๊ฒฐ๊ณผ'),
83
  examples=examples,
 
13
 
14
  import os
15
  import numpy as np
 
16
  from glob import glob
17
  import gradio as gr
18
  from loguru import logger
 
29
  logger.info('===== APP RESTARTED =====')
30
 
31
  # ========== Models ==========
32
+ # MODEL_DIR = 'models'
33
+ # os.environ['TORCH_HOME'] = MODEL_DIR
34
+ # os.environ['TF_HOME'] = MODEL_DIR
35
  feature_extractor = ImageGPTFeatureExtractor.from_pretrained(
36
+ "openai/imagegpt-medium",
37
+ # cache_dir=MODEL_DIR
38
+ )
39
  model = ImageGPTForCausalImageModeling.from_pretrained(
40
+ "openai/imagegpt-medium",
41
+ # cache_dir=MODEL_DIR
42
+ )
43
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
44
+ model.to(DEVICE)
45
+ logger.info(f'model loaded (DEVICE:{DEVICE})')
46
 
47
  def process_image(image):
48
  logger.info('--- image file received')
 
62
 
63
  # generate (no beam search)
64
  context = np.concatenate((np.full((batch_size, 1), model.config.vocab_size - 1), primers), axis=1)
65
+ context = torch.tensor(context).to(DEVICE)
66
  output = model.generate(input_ids=context, max_length=n_px*n_px + 1, temperature=1.0, do_sample=True, top_k=40)
67
  # decode back to images (convert color cluster tokens back to pixels)
68
  samples = output[:,1:].cpu().detach().numpy()
 
77
 
78
  # return as PIL Image
79
  completion = Image.fromarray(result)
80
+ logger.info('--- image generated')
81
  return completion
82
 
83
 
84
  iface = gr.Interface(
85
  process_image,
86
  title="์ด๋ฏธ์ง€์˜ ์ ˆ๋ฐ˜์„ ์ง€์šฐ๊ณ  ์ ˆ๋ฐ˜์„ ์ฑ„์›Œ ๋„ฃ์–ด์ฃผ๋Š” Image Completion ๋ฐ๋ชจ์ž…๋‹ˆ๋‹ค (ImageGPT)",
87
+ description='์ฃผ์–ด์ง„ ์ด๋ฏธ์ง€์˜ ์ ˆ๋ฐ˜ ์•„๋ž˜๋ฅผ AI๊ฐ€ ์ฑ„์›Œ ๋„ฃ์–ด์ค๋‹ˆ๋‹ค (CPU๋กœ ์•ฝ 100์ดˆ ์ •๋„ ์†Œ์š”๋ฉ๋‹ˆ๋‹ค)',
88
  inputs=gr.inputs.Image(type="pil", label='์ธํ’‹ ์ด๋ฏธ์ง€'),
89
  outputs=gr.outputs.Image(type="pil", label='AI๊ฐ€ ๊ทธ๋ฆฐ ๊ฒฐ๊ณผ'),
90
  examples=examples,
igpt-xl-miscellaneous-2-orig.png DELETED
Binary file (2.64 kB)
 
igpt-xl-miscellaneous-29-orig.png DELETED
Binary file (2.49 kB)