Edit model card

License for Gamma 2B is Google's Gamma license
License for Lumina-Next-T2I 2B checkpoints is Apache-2.

In this repo, you will find FP32 (original, un-changed), BF16 and FP16 PTH and FP32, BF16, FP16 safetensor files for Lumina T2I 2B text-to-image model. You will also find the same for EMA variant. BF16 pth file works fine, I plan to check the rest later. There could be some code missing in safetensors files due to it being removed during conversion, I don't know. If you try to run any of the files, let me know how they work.

You can also find un-gated files for Gamma 2B 4-bit (bnb) and 16-bit. Both are simply copies of those files from unsloth/aplindale repos.

Script used for converting non-EMA checkpoints.

import torch
from safetensors.torch import save_file, load_file

# Load the FP32 model
fp32_model_path = "consolidated.00-of-01.pth"
fp32_model = torch.load(fp32_model_path, map_location='cpu')

# Convert the model to BF16
bf16_model = {}
for key, value in fp32_model.items():
    if isinstance(value, torch.Tensor):
        bf16_model[key] = value.to(torch.bfloat16)
    elif isinstance(value, dict):
        bf16_model[key] = {k: v.to(torch.bfloat16) if isinstance(v, torch.Tensor) else v for k, v in value.items()}
    else:
        bf16_model[key] = value

# Convert the model to FP16
fp16_model = {}
for key, value in fp32_model.items():
    if isinstance(value, torch.Tensor):
        fp16_model[key] = value.half()
    elif isinstance(value, dict):
        fp16_model[key] = {k: v.half() if isinstance(v, torch.Tensor) else v for k, v in value.items()}
    else:
        fp16_model[key] = value

# Save the FP32 model in safetensors format
fp32_safetensors_path = "consolidated.00-of-01_fp32.safetensors"
save_file(fp32_model, fp32_safetensors_path)

# Save the BF16 model in safetensors format
bf16_safetensors_path = "consolidated.00-of-01_bf16.safetensors"
save_file(bf16_model, bf16_safetensors_path)

# Save the FP16 model in safetensors format
fp16_safetensors_path = "consolidated.00-of-01_fp16.safetensors"
save_file(fp16_model, fp16_safetensors_path)

# Save the BF16 model in .pth format
bf16_model_path = "consolidated.00-of-01_bf16.pth"
torch.save(bf16_model, bf16_model_path)

fp16_model_path = "consolidated.00-of-01_fp16.pth"
torch.save(fp16_model, fp16_model_path)

Script used for converting EMA checkpoints.

import torch
from safetensors.torch import save_file, load_file

# Load the FP32 model
fp32_model_path = "consolidated_ema.00-of-01.pth"
fp32_model = torch.load(fp32_model_path, map_location='cpu')

# Convert the model to BF16
bf16_model = {}
for key, value in fp32_model.items():
    if isinstance(value, torch.Tensor):
        bf16_model[key] = value.to(torch.bfloat16)
    elif isinstance(value, dict):
        bf16_model[key] = {k: v.to(torch.bfloat16) if isinstance(v, torch.Tensor) else v for k, v in value.items()}
    else:
        bf16_model[key] = value

# Convert the model to FP16
fp16_model = {}
for key, value in fp32_model.items():
    if isinstance(value, torch.Tensor):
        fp16_model[key] = value.half()
    elif isinstance(value, dict):
        fp16_model[key] = {k: v.half() if isinstance(v, torch.Tensor) else v for k, v in value.items()}
    else:
        fp16_model[key] = value

# Save the FP32 model in safetensors format
fp32_safetensors_path = "consolidated_ema.00-of-01_fp32.safetensors"
save_file(fp32_model, fp32_safetensors_path)

# Save the BF16 model in safetensors format
bf16_safetensors_path = "consolidated_ema.00-of-01_bf16.safetensors"
save_file(bf16_model, bf16_safetensors_path)

# Save the FP16 model in safetensors format
fp16_safetensors_path = "consolidated_ema.00-of-01_fp16.safetensors"
save_file(fp16_model, fp16_safetensors_path)

# Save the BF16 model in .pth format
bf16_model_path = "consolidated_ema.00-of-01_bf16.pth"
torch.save(bf16_model, bf16_model_path)

fp16_model_path = "consolidated_ema.00-of-01_fp16.pth"
torch.save(fp16_model, fp16_model_path)
Downloads last month
0
Unable to determine this model's library. Check the docs .