PommesPeter commited on
Commit
e5e92a0
β€’
1 Parent(s): e872738

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -39,12 +39,10 @@ description = """
39
 
40
  Demo current model: `Lumina-Next-T2I`
41
 
42
- ### <span style='color: red;'>Due to the high volume of access, we have temporarily disabled the resolution extrapolation functionality.
43
-
44
- ### Additionally, we offer three alternative links for Lumina-T2X access. Try to visit other demo sites. [[demo1](http://106.14.2.150:10022/)] [[demo2](http://106.14.2.150:10023/)]
45
-
46
  """
47
 
 
 
48
  examples = [
49
  ["πŸ‘½πŸ€–πŸ‘ΉπŸ‘»"],
50
  ["ε­€θˆŸθ“‘η¬ ηΏ"],
@@ -159,7 +157,7 @@ def model_main(args, master_port, rank, request_queue, response_queue):
159
 
160
  text_encoder = (
161
  AutoModelForCausalLM.from_pretrained(
162
- "google/gemma-2b", torch_dtype=dtype, device_map="cuda"
163
  )
164
  .get_decoder()
165
  .eval()
@@ -169,7 +167,7 @@ def model_main(args, master_port, rank, request_queue, response_queue):
169
  raise NotImplementedError("Inference with >1 GPUs not yet supported")
170
 
171
  tokenizer = AutoTokenizer.from_pretrained(
172
- "google/gemma-2b", add_bos_token=True, add_eos_token=True
173
  )
174
  tokenizer.padding_side = "right"
175
 
@@ -182,7 +180,7 @@ def model_main(args, master_port, rank, request_queue, response_queue):
182
  if dist.get_rank() == 0:
183
  print(f"Creating DiT: Next-DiT")
184
  # latent_size = train_args.image_size // 8
185
- model = models.__dict__["DiT_Llama_2B_patch2"](
186
  qk_norm=train_args.qk_norm,
187
  cap_feat_dim=cap_feat_dim,
188
  )
 
39
 
40
  Demo current model: `Lumina-Next-T2I`
41
 
 
 
 
 
42
  """
43
 
44
+ hf_token = os.environ['HF_TOKEN']
45
+
46
  examples = [
47
  ["πŸ‘½πŸ€–πŸ‘ΉπŸ‘»"],
48
  ["ε­€θˆŸθ“‘η¬ ηΏ"],
 
157
 
158
  text_encoder = (
159
  AutoModelForCausalLM.from_pretrained(
160
+ "google/gemma-2b", torch_dtype=dtype, device_map="cuda", token=hf_token,
161
  )
162
  .get_decoder()
163
  .eval()
 
167
  raise NotImplementedError("Inference with >1 GPUs not yet supported")
168
 
169
  tokenizer = AutoTokenizer.from_pretrained(
170
+ "google/gemma-2b", add_bos_token=True, add_eos_token=True, token=hf_token,
171
  )
172
  tokenizer.padding_side = "right"
173
 
 
180
  if dist.get_rank() == 0:
181
  print(f"Creating DiT: Next-DiT")
182
  # latent_size = train_args.image_size // 8
183
+ model = models.__dict__["Next-DiT"](
184
  qk_norm=train_args.qk_norm,
185
  cap_feat_dim=cap_feat_dim,
186
  )