adamo1139 commited on
Commit
80c1f0b
1 Parent(s): 0ef3374

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +101 -1
README.md CHANGED
@@ -9,4 +9,104 @@ License for Lumina-Next-T2I 2B checkpoints is Apache-2.
9
  In this repo, you will find FP32 (original, un-changed), BF16 and FP16 PTH and FP32, BF16, FP16 safetensor files for Lumina T2I 2B text-to-image model. You will also find the same for EMA variant.
10
  BF16 pth file works fine, I plan to check the rest later. There could be some code missing in safetensors files due to it being removed during conversion, I don't know. If you try to run any of the files, let me know how they work.
11
 
12
- You can also find un-gated files for Gamma 2B 4-bit (bnb) and 16-bit. Both are simply copies of those files from unsloth/aplindale repos.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  In this repo, you will find FP32 (original, un-changed), BF16 and FP16 PTH and FP32, BF16, FP16 safetensor files for Lumina T2I 2B text-to-image model. You will also find the same for EMA variant.
10
  BF16 pth file works fine, I plan to check the rest later. There could be some code missing in safetensors files due to it being removed during conversion, I don't know. If you try to run any of the files, let me know how they work.
11
 
12
+ You can also find un-gated files for Gamma 2B 4-bit (bnb) and 16-bit. Both are simply copies of those files from unsloth/aplindale repos.
13
+
14
+ Script used for converting non-EMA checkpoints.
15
+
16
+ ```
17
+ import torch
18
+ from safetensors.torch import save_file, load_file
19
+
20
+ # Load the FP32 model
21
+ fp32_model_path = "consolidated.00-of-01.pth"
22
+ fp32_model = torch.load(fp32_model_path, map_location='cpu')
23
+
24
+ # Convert the model to BF16
25
+ bf16_model = {}
26
+ for key, value in fp32_model.items():
27
+ if isinstance(value, torch.Tensor):
28
+ bf16_model[key] = value.to(torch.bfloat16)
29
+ elif isinstance(value, dict):
30
+ bf16_model[key] = {k: v.to(torch.bfloat16) if isinstance(v, torch.Tensor) else v for k, v in value.items()}
31
+ else:
32
+ bf16_model[key] = value
33
+
34
+ # Convert the model to FP16
35
+ fp16_model = {}
36
+ for key, value in fp32_model.items():
37
+ if isinstance(value, torch.Tensor):
38
+ fp16_model[key] = value.half()
39
+ elif isinstance(value, dict):
40
+ fp16_model[key] = {k: v.half() if isinstance(v, torch.Tensor) else v for k, v in value.items()}
41
+ else:
42
+ fp16_model[key] = value
43
+
44
+ # Save the FP32 model in safetensors format
45
+ fp32_safetensors_path = "consolidated.00-of-01_fp32.safetensors"
46
+ save_file(fp32_model, fp32_safetensors_path)
47
+
48
+ # Save the BF16 model in safetensors format
49
+ bf16_safetensors_path = "consolidated.00-of-01_bf16.safetensors"
50
+ save_file(bf16_model, bf16_safetensors_path)
51
+
52
+ # Save the FP16 model in safetensors format
53
+ fp16_safetensors_path = "consolidated.00-of-01_fp16.safetensors"
54
+ save_file(fp16_model, fp16_safetensors_path)
55
+
56
+ # Save the BF16 model in .pth format
57
+ bf16_model_path = "consolidated.00-of-01_bf16.pth"
58
+ torch.save(bf16_model, bf16_model_path)
59
+
60
+ fp16_model_path = "consolidated.00-of-01_fp16.pth"
61
+ torch.save(fp16_model, fp16_model_path)
62
+ ```
63
+
64
+ Script used for converting EMA checkpoints.
65
+
66
+ ```
67
+ import torch
68
+ from safetensors.torch import save_file, load_file
69
+
70
+ # Load the FP32 model
71
+ fp32_model_path = "consolidated_ema.00-of-01.pth"
72
+ fp32_model = torch.load(fp32_model_path, map_location='cpu')
73
+
74
+ # Convert the model to BF16
75
+ bf16_model = {}
76
+ for key, value in fp32_model.items():
77
+ if isinstance(value, torch.Tensor):
78
+ bf16_model[key] = value.to(torch.bfloat16)
79
+ elif isinstance(value, dict):
80
+ bf16_model[key] = {k: v.to(torch.bfloat16) if isinstance(v, torch.Tensor) else v for k, v in value.items()}
81
+ else:
82
+ bf16_model[key] = value
83
+
84
+ # Convert the model to FP16
85
+ fp16_model = {}
86
+ for key, value in fp32_model.items():
87
+ if isinstance(value, torch.Tensor):
88
+ fp16_model[key] = value.half()
89
+ elif isinstance(value, dict):
90
+ fp16_model[key] = {k: v.half() if isinstance(v, torch.Tensor) else v for k, v in value.items()}
91
+ else:
92
+ fp16_model[key] = value
93
+
94
+ # Save the FP32 model in safetensors format
95
+ fp32_safetensors_path = "consolidated_ema.00-of-01_fp32.safetensors"
96
+ save_file(fp32_model, fp32_safetensors_path)
97
+
98
+ # Save the BF16 model in safetensors format
99
+ bf16_safetensors_path = "consolidated_ema.00-of-01_bf16.safetensors"
100
+ save_file(bf16_model, bf16_safetensors_path)
101
+
102
+ # Save the FP16 model in safetensors format
103
+ fp16_safetensors_path = "consolidated_ema.00-of-01_fp16.safetensors"
104
+ save_file(fp16_model, fp16_safetensors_path)
105
+
106
+ # Save the BF16 model in .pth format
107
+ bf16_model_path = "consolidated_ema.00-of-01_bf16.pth"
108
+ torch.save(bf16_model, bf16_model_path)
109
+
110
+ fp16_model_path = "consolidated_ema.00-of-01_fp16.pth"
111
+ torch.save(fp16_model, fp16_model_path)
112
+ ```