import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time


#模拟原始权重矩阵生成
def generate_weight_matrix(rows,cols):
    """
    生成随机权重矩阵，元素范围为【-1,1】
    :param rows: 矩阵行数
    :param cols: 矩阵列数
    :return:
       weight：生成的矩阵权重(torch.Tensor)
    """
    weight = torch.zeros(rows,cols) * 2 - 1
    return weight

# 量化 int8函数
def quantize_int8(weight):
    """
    将权重矩阵量化为int8表示，并发返回量化矩阵和缩放因子
    :param weight: 量化的矩阵
    :return:
        quantized weight
        scale:  量化缩放因子
    """
    max_val = torch.max(torch.abs(weight))
    scale = max_val / 127
    quantized = torch.clamp(torch.round(weight / scale),-127,127).to(torch.int8)
    return quantized, scale

# 反量化函数
def dequantized_int8(quantized, scale):
    """
    将int8量化矩阵反量化浮点矩阵
    :param quantized: int8量化矩阵
    :param scale: 量化缩放因子
    :return:
      dequantized：反量化矩阵（torch。tensor）
    """
    return quantized.to(torch.float32) * scale

# 低秩适配模块
class LoRAModule(nn.Module):
    def __init__(self,original_dim,rank=8):
        super(LoRAModule,self).__init__()
        self.lora_a = nn.Parameter(torch.randn(original_dim,rank) *0.01)
        self.lora_b = nn.Parameter(torch.randn(rank,original_dim) *0.01)

    def forward(self,x):
        """
        对输入进行低秩补偿
        :param x:
        :return:
        """
        compensation = torch.matmul(self.lora_a,self.lora_b)
        return compensation
class QLoRAAdapter(nn.Module):
    def __init__(self,rows,cols,rank=8):
        super(QLoRAAdapter,self).__init__()
        self.rows = rows
        self.cols = cols
        #生成原始矩阵
        self.original_weight = generate_weight_matrix(rows,cols)
        #量化操作
        quantized,scale = quantize_int8(self.original_weight)
        self.register_buffer('quantized_weight',quantized)
        self.register_buffer('scale',torch.tensor(scale))

        self.lora = LoRAModule(cols,rank)

    def forward(self):
        dequantized = dequantized_int8(self.quantized_weight,self.scale.item())
        compensation=self.lora(dequantized)
        compensationed_weight = dequantized +compensation
        return compensationed_weight

    def simulate_qlora(self):
        pass



if __name__ == '__main__':
    weight = generate_weight_matrix(2,2)
    print(weight)
    quantized,scale = quantize_int8(weight)
    print(quantized)
    dequantized8 = dequantized_int8(quantized,scale)
    print(dequantized8)
