from enum import Enum
import torch
import torch.nn as nn
import torch.nn.functional as F

from .quant_utils import WeightQuantizer, ActivationQuantizer
from .trans_utils import kronecker_matmul

class ForwardMode(Enum):
    ORG = "org"
    CALIB = "calib"
    EVAL = "eval"


class FakeQuantizedLinearConfig:
    def __init__(self, w_bits=16, a_bits=16, w_asym=False, a_asym=False, lwc=False, lac=False, a_groupsize=-1):
        self.w_bits = w_bits
        self.a_bits = a_bits
        self.w_asym = w_asym
        self.a_asym = a_asym
        self.lwc = lwc
        self.lac = lac
        self.a_groupsize = a_groupsize


class FakeQuantizedLinear(nn.Module):
    def __init__(self, config, linear: nn.Linear):
        super(FakeQuantizedLinear, self).__init__()
        self.config = config
        self.linear = linear
        self.weight_quantizer = WeightQuantizer(in_size=linear.weight.shape[1], 
                                                out_size=linear.weight.shape[0], 
                                                bits=config.w_bits, 
                                                perchannel=True, 
                                                sym=not(config.w_asym), 
                                                mse=False, 
                                                lwc=config.lwc)
        self.act_quantizer = ActivationQuantizer(bits=config.a_bits, 
                                                 sym=not(config.a_asym), 
                                                 lac=config.lac, 
                                                 groupsize=config.a_groupsize)
        self._mode = ForwardMode.ORG

    @property
    def weight(self):
        return self.linear.weight
    
    @property
    def bias(self):
        return self.linear.bias

    def _ori_forward(self, hidden_states):
        return self.linear(hidden_states)
    
    def _fake_quant_forward(self, hidden_states, weight, bias):
        hidden_states = self.act_quantizer(hidden_states)
        weight = self.weight_quantizer(weight)
        return F.linear(hidden_states, weight, bias)
    
    def forward(self, hidden_states):
        weight = self.linear.weight
        bias = self.linear.bias
        if self._mode == ForwardMode.ORG:
            return self._ori_forward(hidden_states)
        else:
            return self._fake_quant_forward(hidden_states, weight, bias)
        
    def to_org_mode(self):
        self._mode = ForwardMode.ORG

    def to_calib_mode(self):
        self._mode = ForwardMode.CALIB
   
    def reparameterize(self, quant_weight=False):
        if not self._mode == ForwardMode.EVAL:
            if quant_weight:
                weight = self.linear.weight.data
                weight = self.weight_quantizer(weight)
                self.weight_quantizer.enable = False
            self.linear.weight.data = weight

    def to_eval_mode(self, quant_weight=False):
        if not self._mode == ForwardMode.EVAL:
            with torch.no_grad():
                self.reparameterize(quant_weight=quant_weight)
            self._mode = ForwardMode.EVAL


class FlatQuantizedLinear(FakeQuantizedLinear):
    def __init__(self, args, linear: nn.Linear):
        super(FlatQuantizedLinear, self).__init__(args, linear)
        self.weight_in_trans = None
        self.weight_out_trans = None
        self.act_in_trans = None

    def set_trans(self, weight_in_trans=None, weight_out_trans=None, act_in_trans=None):
        if weight_in_trans is not None:
            self.weight_in_trans = weight_in_trans
        if weight_out_trans is not None:
            self.weight_out_trans = weight_out_trans
        if act_in_trans is not None:
            self.act_in_trans = act_in_trans

    def _calib_forward(self, hidden_states, weight_in_trans=None, weight_out_trans=None, act_in_trans=None):
        if act_in_trans is not None:
            hidden_states = act_in_trans(hidden_states)
        weight = self.linear.weight.data
        # quantization-adaptive transform
        if weight_in_trans is not None:
            weight = weight_in_trans(weight, inv_t=True)
        if self.weight_quantizer.lwc:
            weight = self.weight_quantizer.apply_wclip(weight)
        # learnable weight clipping 
        if weight_out_trans is not None:
            weight = weight_out_trans(weight.T).T
        if weight_out_trans is not None and self.linear.bias is not None:
            bias = weight_out_trans(self.linear.bias.data)
        else:
            bias = self.linear.bias
        # quantize
        output = self._fake_quant_forward(hidden_states, weight, bias)
        return output

    def _eval_forward(self, hidden_states, act_in_trans=None):
        if act_in_trans is not None:
            hidden_states = act_in_trans(hidden_states)
        weight = self.linear.weight.data
        bias = self.linear.bias
        # quantize
        output = self._fake_quant_forward(hidden_states, weight, bias)
        return output

    def forward(self, hidden_states):
        weight_in_trans = self.weight_in_trans
        weight_out_trans = self.weight_out_trans
        act_in_trans = self.act_in_trans
        if self._mode == ForwardMode.ORG:
            return self._ori_forward(hidden_states)
        elif self._mode == ForwardMode.CALIB:
            return self._calib_forward(hidden_states, weight_in_trans=weight_in_trans, weight_out_trans=weight_out_trans, act_in_trans=act_in_trans)
        else:
            return self._eval_forward(hidden_states, act_in_trans=act_in_trans)

    def to_org_mode(self):
        self._mode = ForwardMode.ORG

    def to_calib_mode(self):
        self._mode = ForwardMode.CALIB

    def reparameterize(self):
        if not self._mode == ForwardMode.EVAL:
            weight_in_trans = self.weight_in_trans
            weight_out_trans = self.weight_out_trans
            weight = self.linear.weight.data
            ori_dtype = weight.dtype
            weight = weight.to(torch.float64)
            # quantization-adaptive transform
            if weight_in_trans is not None:
                weight = weight_in_trans(weight, inv_t=True)
            if self.weight_quantizer.lwc:
                weight = self.weight_quantizer.apply_wclip(weight)
            if weight_out_trans is not None:
                weight = weight_out_trans(weight.T).T
            if weight_out_trans is not None and self.linear.bias is not None:
                self.linear.bias.data = weight_out_trans(self.linear.bias.data)                
            self.linear.weight.data = weight.to(ori_dtype)
            self.weight_in_trans = None
            self.weight_out_trans = None
    
    def to_eval_mode(self):
        if not self._mode == ForwardMode.EVAL:
            with torch.no_grad():
                self.reparameterize()
            self._mode = ForwardMode.EVAL

    def fake_quant_weight(self):
        self.linear.weight.data = self.weight_quantizer(self.linear.weight.data)
        self.weight_quantizer.enable = False

class FlatNormWrapper(nn.Module):
    def __init__(self, norm: nn.LayerNorm, trans: nn.Module=None):
        super(FlatNormWrapper, self).__init__()
        self.norm = norm
        self.trans = trans
        self._mode = ForwardMode.ORG

    @property
    def weight(self):
        return self.norm.weight

    @property
    def bias(self):
        return self.norm.bias

    def forward(self, hidden_states):
        if self._mode == ForwardMode.ORG:
            return self._ori_forward(hidden_states)
        else:
            return self._calib_eval_forward(hidden_states)
    
    def _ori_forward(self, hidden_states):
        return self.norm(hidden_states)
    
    def _calib_eval_forward(self, hidden_states):
        hidden_states = self.norm(hidden_states)
        if self.trans is not None:
            hidden_states = self.trans(hidden_states)
        return hidden_states
    
    def to_org_mode(self):
        self._mode = ForwardMode.ORG

    def to_calib_mode(self):
        self._mode = ForwardMode.CALIB
        
    def to_eval_mode(self):
        self.reparameterize()
        self._mode = ForwardMode.EVAL

    def reparameterize(self):
        if not self._mode == ForwardMode.EVAL:
            # if self.trans is not None and self.trans.diag_trans is not None:
            #     diag_scale = self.trans.diag_trans.diag_scale.data
            #     self.norm.weight.data = (self.norm.weight.data * diag_scale).to(self.norm.weight.data)
            self._mode = ForwardMode.EVAL