Logan Zoellner commited on
Commit
e3d0d75
1 Parent(s): 2ab02d6
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -1,5 +1,5 @@
1
 
2
-
3
 
4
  import os
5
  os.system("git clone https://huggingface.co/Cene655/ImagenT5-3B")
@@ -48,7 +48,7 @@ from gfpgan import GFPGANer
48
  has_cuda = th.cuda.is_available()
49
  device = th.device('cpu' if not has_cuda else 'cuda')
50
 
51
- Setting Up
52
 
53
  def model_fn(x_t, ts, **kwargs):
54
  guidance_scale = 5
@@ -97,17 +97,17 @@ model.to(device)
97
  model.load_state_dict(_fix_path('/content/ImagenT5-3B/model.pt'))
98
  print('total base parameters', sum(x.numel() for x in model.parameters()))
99
 
100
- total base parameters 1550556742
101
 
102
  num_params = sum(param.numel() for param in model.parameters())
103
  num_params
104
 
105
- 1550556742
106
 
107
  realesrgan_model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64,
108
  num_block=23, num_grow_ch=32, scale=4)
109
 
110
- netscale = 4
111
 
112
  upsampler = RealESRGANer(
113
  scale=netscale,
@@ -129,12 +129,7 @@ face_enhancer = GFPGANer(
129
 
130
  tokenizer = AutoTokenizer.from_pretrained(options['t5_name'])
131
 
132
- /usr/local/lib/python3.7/dist-packages/transformers/models/t5/tokenization_t5_fast.py:161: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.
133
- For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.
134
- - Be aware that you SHOULD NOT rely on t5-3b automatically truncating your input to 512 when padding/encoding.
135
- - If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.
136
- - To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.
137
- FutureWarning,
138
 
139
  #@title What do you want to generate?
140
 
@@ -174,7 +169,7 @@ def gen_img(prompt):
174
  model_kwargs["mask"] = th.cat((cond_attention_mask,
175
  uncond_attention_mask)).to(device)
176
 
177
- Generation
178
 
179
  model.del_cache()
180
  sample = diffusion.p_sample_loop(
@@ -189,6 +184,9 @@ def gen_img(prompt):
189
 
190
  return sample
191
 
 
 
 
192
  demo = gr.Blocks()
193
 
194
  with demo:
1
 
2
+ import gradio as gr
3
 
4
  import os
5
  os.system("git clone https://huggingface.co/Cene655/ImagenT5-3B")
48
  has_cuda = th.cuda.is_available()
49
  device = th.device('cpu' if not has_cuda else 'cuda')
50
 
51
+ #Setting Up
52
 
53
  def model_fn(x_t, ts, **kwargs):
54
  guidance_scale = 5
97
  model.load_state_dict(_fix_path('/content/ImagenT5-3B/model.pt'))
98
  print('total base parameters', sum(x.numel() for x in model.parameters()))
99
 
100
+ #total base parameters 1550556742
101
 
102
  num_params = sum(param.numel() for param in model.parameters())
103
  num_params
104
 
105
+ #1550556742
106
 
107
  realesrgan_model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64,
108
  num_block=23, num_grow_ch=32, scale=4)
109
 
110
+ #netscale = 4
111
 
112
  upsampler = RealESRGANer(
113
  scale=netscale,
129
 
130
  tokenizer = AutoTokenizer.from_pretrained(options['t5_name'])
131
 
132
+
 
 
 
 
 
133
 
134
  #@title What do you want to generate?
135
 
169
  model_kwargs["mask"] = th.cat((cond_attention_mask,
170
  uncond_attention_mask)).to(device)
171
 
172
+ #Generation
173
 
174
  model.del_cache()
175
  sample = diffusion.p_sample_loop(
184
 
185
  return sample
186
 
187
+
188
+
189
+
190
  demo = gr.Blocks()
191
 
192
  with demo: