File size: 296 Bytes
43a66d3 |
1 2 3 4 5 6 7 8 9 10 11 |
import torch
blob_path = "./llama-2-chat-7b_r0.8_g128.pth"
blob = torch.load(blob_path)
for layer, attr in blob.items():
print(f"{layer:30} | q_dtype: {attr['q_dtype']:5} | orig. shape: {str(attr['original_shape']):15} | quantized_shape: {str(attr['q_weight'].shape):15}")
print("done.") |