ZhouZJ36DL commited on
Commit
4a31fc7
·
1 Parent(s): 8e8c8d2

modified: src/flux/modules/conditioner.py

Browse files
src/flux/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
 
src/flux/__pycache__/_version.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
 
src/flux/__pycache__/math.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
 
src/flux/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
 
src/flux/__pycache__/sampling.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
 
src/flux/__pycache__/util.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
 
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
 
src/flux/modules/__pycache__/conditioner.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
 
src/flux/modules/__pycache__/layers.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
 
src/flux/modules/conditioner.py CHANGED
@@ -41,6 +41,11 @@ class HFEmbedder(nn.Module):
41
  assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}"
42
  print(input_ids)
43
 
 
 
 
 
 
44
  outputs = self.hf_module(
45
  input_ids=input_ids.to(self.hf_module.device),
46
  attention_mask=batch_encoding["attention_mask"].to(self.hf_module.device),
 
41
  assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}"
42
  print(input_ids)
43
 
44
+ print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
45
+ print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
46
+ print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
47
+ print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
48
+
49
  outputs = self.hf_module(
50
  input_ids=input_ids.to(self.hf_module.device),
51
  attention_mask=batch_encoding["attention_mask"].to(self.hf_module.device),