Badr AlKhamissi commited on
Commit
e169248
1 Parent(s): 35c104c

config bug fix

Browse files
Files changed (3) hide show
  1. app.py +2 -1
  2. code/config/base.yaml +2 -0
  3. code/losses.py +2 -8
app.py CHANGED
@@ -56,7 +56,7 @@ Please select a semantic concept word and a letter you wish to generate, it will
56
 
57
  DESCRIPTION="""This demo builds on the [Word-As-Image for Semantic Typography](https://wordasimage.github.io/Word-As-Image-Page/) work to support Arabic fonts and morphing whole words into semantic concepts."""
58
 
59
- DESCRIPTION += '\n<p>This demo is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"> Creative Commons Attribution-ShareAlike 4.0 International License</a>.</p>'
60
 
61
  if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
62
  DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
@@ -94,6 +94,7 @@ def set_config(semantic_concept, word, prompt, font_name, num_steps):
94
  cfg.font = font_name
95
  cfg.seed = 0
96
  cfg.num_iter = num_steps
 
97
 
98
  if ' ' in cfg.word:
99
  raise gr.Error(f'should be only one word')
 
56
 
57
  DESCRIPTION="""This demo builds on the [Word-As-Image for Semantic Typography](https://wordasimage.github.io/Word-As-Image-Page/) work to support Arabic fonts and morphing whole words into semantic concepts."""
58
 
59
+ # DESCRIPTION += '\n<p>This demo is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"> Creative Commons Attribution-ShareAlike 4.0 International License</a>.</p>'
60
 
61
  if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
62
  DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
 
94
  cfg.font = font_name
95
  cfg.seed = 0
96
  cfg.num_iter = num_steps
97
+ cfg.batch_size = 1
98
 
99
  if ' ' in cfg.word:
100
  raise gr.Error(f'should be only one word')
code/config/base.yaml CHANGED
@@ -45,6 +45,8 @@ conformal_0.5_dist_pixel_100_kernel201:
45
 
46
  demo:
47
  parent_config: baseline
 
 
48
  level_of_cc: 1
49
  num_iter: 500
50
  loss:
 
45
 
46
  demo:
47
  parent_config: baseline
48
+ batch_size: 1
49
+ token: false
50
  level_of_cc: 1
51
  num_iter: 500
52
  loss:
code/losses.py CHANGED
@@ -7,11 +7,6 @@ from torch.nn import functional as nnf
7
  from easydict import EasyDict
8
  from shapely.geometry import Point
9
  from shapely.geometry.polygon import Polygon
10
- from torchvision import transforms
11
- from PIL import Image
12
- from transformers import CLIPProcessor, CLIPModel
13
-
14
- from diffusers import StableDiffusionPipeline
15
 
16
  class SDSLoss(nn.Module):
17
  def __init__(self, cfg, device, model):
@@ -19,7 +14,6 @@ class SDSLoss(nn.Module):
19
  self.cfg = cfg
20
  self.device = device
21
  self.pipe = model
22
- self.pipe = self.pipe.to(self.device)
23
 
24
  # self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14").to(self.device)
25
  # self.clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
@@ -47,8 +41,8 @@ class SDSLoss(nn.Module):
47
 
48
  self.text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
49
  self.text_embeddings = self.text_embeddings.repeat_interleave(self.cfg.batch_size, 0)
50
- del self.pipe.tokenizer
51
- del self.pipe.text_encoder
52
 
53
 
54
  def forward(self, x_aug):
 
7
  from easydict import EasyDict
8
  from shapely.geometry import Point
9
  from shapely.geometry.polygon import Polygon
 
 
 
 
 
10
 
11
  class SDSLoss(nn.Module):
12
  def __init__(self, cfg, device, model):
 
14
  self.cfg = cfg
15
  self.device = device
16
  self.pipe = model
 
17
 
18
  # self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14").to(self.device)
19
  # self.clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
 
41
 
42
  self.text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
43
  self.text_embeddings = self.text_embeddings.repeat_interleave(self.cfg.batch_size, 0)
44
+ # del self.pipe.tokenizer
45
+ # del self.pipe.text_encoder
46
 
47
 
48
  def forward(self, x_aug):