import os
import struct
import json

import numpy as np
import torch

from model import ModelArgs, Transformer

#---------- common utils ----------#

def serialize_fp32(file, tensor : torch.Tensor):
    d = tensor.detach().cpu().view(-1).to(torch.float32).numpy()
    b = struct.pack(f"{len(d)}f", *d)
    file.write(b)
    
def serialize_int8(file, tensor : torch.Tensor):
    d = tensor.detach().cpu().view(-1).numpy().astype(np.int8)
    b = struct.pack(f"{len(d)}b", *d)
    
def quantize_symmetry_int8(w : torch.Tensor, group_size : int):
    '''
    symmetric quantization into int8, range[-127,127]
    https://mp.weixin.qq.com/s/EGPQNemfoyE1QnyJyjgsfg
    '''
    assert w.numel() % group_size == 0
    origin_shape = w.shape
    w = w.float()
    w = w.reshape(-1, group_size)
    # find max in each group
    wmax = torch.abs(w).max(dim=1).values
    # calculate the scaling factor
    scale = wmax / 127.0
    # scale into range[-127, 127]
    quant = w / scale[:, None]
    # round to nearest integer
    int8val = torch.round(quant).to(torch.int8)
    # dequantize by rescaling
    fp32val = (int8val.float() * scale[:, None]).view(-1)
    fp32valr= fp32val.reshape(-1, group_size)
    # calculate the max error in each group
    err = torch.abs(fp32valr - w).max(dim=1).values
    # find the max error across all groups
    max_err = err.max().item()
    return int8val, scale, max_err

def quantize_asymm_int8(w : torch.Tensor, group_size : int):
    quantized_data_type = torch.int8
    origin_shape = w.shape
    temp_w = w.float().detach().cpu().view(-1)
    Wmax = temp_w.max().item()
    Wmin = temp_w.min().item()
    # Get the Qmax and Qmin from quantized data type info
    Qmax = torch.iinfo(quantized_data_type).max
    Qmin = torch.iinfo(quantized_data_type).min
    S = (Wmax - Wmin) / (Qmax - Qmin)
    # Zero point value
    Z = Qmin - Wmin / S
    if Z < Qmin:
        Z = Qmin
    elif Z > Qmax:
        Z = Qmax
    else:
        Z = int(round(Z)) 
    quantized_temp_w = (temp_w / S) + Z
    quantized_temp_w = torch.clamp(torch.round(quantized_temp_w), Qmin, Qmax)
    quantized_temp_w = quantized_temp_w.to(quantized_data_type)
    quantized_temp_w = quantized_temp_w.view(-1, group_size)
    temp_w = temp_w.view(-1, group_size)
    dequantized_temp_w = (quantized_temp_w - Z) * S
    err = torch.abs(dequantized_temp_w - temp_w).max(dim=1).values
    max_err = err.max().item()
    return quantized_temp_w, S, Z, max_err

