#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch, os
from model import PiscesModel, PiscesConfig
from transformers import BitsAndBytesConfig

def quantize(checkpoint, save_path, bits=8):
    """
    Quantize the model loaded from the given checkpoint and save it to the specified path.

    Args:
        checkpoint (str): Path to the checkpoint file containing the model state dict.
        save_path (str): Path where the quantized model state dict will be saved.
        bits (int, optional): Number of bits for quantization. Currently only supports 8-bit quantization. Defaults to 8.
    """
    # Load the model configuration from the JSON file
    cfg = PiscesConfig.from_json("configs/0.5B.json")
    # Initialize the model with the loaded configuration
    model = PiscesModel(cfg)
    # Load the model state dict from the checkpoint
    model.load_state_dict(torch.load(checkpoint, map_location='cpu')['model'])
    
    # Perform 8-bit quantization if the bits parameter is set to 8
    if bits == 8:
        import bitsandbytes as bnb
        # Iterate over all modules in the model
        for m in model.modules():
            # Check if the current module is a linear layer
            if isinstance(m, torch.nn.Linear):
                # Convert the weight of the linear layer to 8-bit
                m.weight = bnb.nn.Params8bit(m.weight)
    
    # Create the directory for the save path if it doesn't exist
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    # Save the quantized model state dict to the specified path
    torch.save(model.state_dict(), save_path)
    # Print a success message with the save path
    print("✅\tQuantized model saved to", save_path)

if __name__ == "__main__":
    # Import the argparse module to handle command-line arguments
    import argparse
    # Create an ArgumentParser object
    ap = argparse.ArgumentParser()
    # Add an argument for the checkpoint path
    ap.add_argument("--ckpt", required=True)
    # Add an argument for the save path
    ap.add_argument("--save", required=True)
    # Add an argument for the number of quantization bits
    ap.add_argument("--bits", type=int, default=8)
    # Parse the command-line arguments
    args = ap.parse_args()
    # Call the quantize function with the parsed arguments
    quantize(args.ckpt, args.save, args.bits)