import torch
import torch.nn as nn

from flat_quant.utils.function_utils import get_init_weight, get_inverse


def kronecker_matmul(x, hadL, hadR):
    """equivalent to
    
        had = torch.kron(hadL, hadR)
        x = x.reshape(-1, had.shape[0])
        x = x.matmul(had).reshape(init_shape)
    """
    init_shape = x.shape
    x = x.reshape(-1, hadL.shape[0], hadR.shape[0])
    x = torch.matmul(x, hadR)
    x = torch.matmul(hadL.T, x)
    return x.reshape(init_shape)

# ---------- transformation version of singular value decomposition ----------


class SingleTransMatrix(nn.Module):
    def __init__(self, size, deriction="right"):
        super(SingleTransMatrix, self).__init__()
        self.size = size
        self._eval_mode = False
        self.matrix = None
        self.matrix_inv_t = None
        self.deriction = deriction

    def get_matrix(self, inv_t=False):
        raise NotImplementedError

    def reparameterize(self):
        raise NotImplementedError

    def to_eval_mode(self):
        if not self._eval_mode:
            with torch.no_grad():
                self.reparameterize()
            self._eval_mode = True

    def __repr__(self):
        res = f"{self.__class__.__name__}(eval_mode={self._eval_mode}"
        res += f", matrix.shape={self.size})"
        return res

    def forward(self, inp, inv_t=False):
        if self.deriction == "right":
            init_shape = inp.shape
            matirx = self.get_matrix(inv_t=inv_t).to(inp)
            inp = inp.reshape(-1, matirx.shape[0])
            return inp.matmul(matirx).reshape(init_shape)
        elif self.deriction == "left":
            init_shape = inp.shape
            matirx = self.get_matrix(inv_t=inv_t).T.to(inp)
            inp = inp.reshape(-1, self.size, init_shape[-1] // self.size)
            output = matirx @ inp
            return output.reshape(init_shape)
        else:
            raise ValueError(f"Invalid deriction")


class SVDSingleTransMatrix(SingleTransMatrix):
    def __init__(self, size, deriction="right", diag_relu=False):
        super(SVDSingleTransMatrix, self).__init__(size, deriction)
        self.linear_u = nn.Linear(size, size, bias=False)
        self.linear_u.weight.data = get_init_weight(size).to(self.linear_u.weight)
        self.linear_u = nn.utils.parametrizations.orthogonal(self.linear_u, orthogonal_map="cayley", use_trivialization=False)
        self.linear_v = nn.Linear(size, size, bias=False)
        self.linear_v.weight.data = get_init_weight(size).to(self.linear_v.weight)
        self.linear_v = nn.utils.parametrizations.orthogonal(self.linear_v, orthogonal_map="cayley", use_trivialization=False)
        if diag_relu:
            beta = 1
            init_diag = torch.log(torch.exp(torch.tensor(beta)) - 1.0) / beta
            self.linear_diag = torch.nn.Parameter(init_diag * torch.ones(size), requires_grad=True)
            self._diag_relu = nn.Softplus(beta=beta, threshold=20)
        else:
            self.linear_diag = torch.nn.Parameter(torch.ones(size), requires_grad=True)
            self._diag_relu = None
        self.matrix = None
        self.matrix_inv_t = None

    def get_diag(self):
        if self._diag_relu is not None:
            return self._diag_relu(self.linear_diag)
        else:
            return self.linear_diag

    def get_matrix(self, inv_t=False):
        if not self._eval_mode:
            orthog_u, orthog_v = self.linear_u.weight, self.linear_v.weight
            linear_diag = self.get_diag()
            if inv_t:
                linear_diag = 1 / linear_diag
            return orthog_u @ torch.diag(linear_diag) @ orthog_v.t()
        else:
            if inv_t:
                return self.matrix_inv_t
            return self.matrix

    def reparameterize(self):
        linear_diag = self.get_diag()
        matrix = self.linear_u.weight @ torch.diag(linear_diag) @ self.linear_v.weight.t()
        matrix_inv_t = self.linear_u.weight @ torch.diag(1 / linear_diag) @ self.linear_v.weight.t()
        self.matrix = nn.Parameter(matrix, requires_grad=False)
        self.matrix_inv_t = nn.Parameter(matrix_inv_t, requires_grad=False)
        self._eval_mode = True
        del self.linear_u, self.linear_diag, self.linear_v


class InvSingleTransMatrix(SingleTransMatrix):
    def __init__(self, size, deriction="right"):
        super(InvSingleTransMatrix, self).__init__(size, deriction)
        linear = nn.Linear(size, size, bias=False)
        linear.weight.data = get_init_weight(size).to(linear.weight)
        self.linear = linear
        self.matrix = None
        self.matrix_inv_t = None

    def get_matrix(self, inv_t=False):
        if not self._eval_mode:
            matrix = self.linear.weight
            if inv_t:
                matrix = get_inverse(matrix).T
            return matrix
        else:
            if inv_t:
                return self.matrix_inv_t
            return self.matrix

    def reparameterize(self):
        if not self._eval_mode:
            matrix = self.linear.weight
            matrix_inv_t = get_inverse(matrix).T
            self.matrix = nn.Parameter(matrix, requires_grad=False)
            self.matrix_inv_t = nn.Parameter(matrix_inv_t, requires_grad=False)
            self._eval_mode = True
            del self.linear


class DiagonalTransMatrix(nn.Module):
    def __init__(self, size, init_para=None):
        super(DiagonalTransMatrix, self).__init__()
        if init_para is None:
            self.diag_scale = torch.nn.Parameter(torch.ones((size)), requires_grad=True)
        else:
            self.diag_scale = torch.nn.Parameter(init_para, requires_grad=True)

    def forward(self, inp, inv_t=False):
        if self.diag_scale is None:
            return inp
        if inv_t:
            inp = inp / self.diag_scale.to(inp)
        else:
            inp = inp * self.diag_scale.to(inp)
        return inp

    def reparameterize(self):
        self.diag_scale = None

    def to_eval_mode(self):
        self.reparameterize()


class GeneralMatrixTrans(nn.Module):
    def __init__(self, left_size, 
                 right_size, 
                 add_diag=False, 
                 diag_init_para=None, 
                 tran_type="svd",
                 diag_relu=False):
        super(GeneralMatrixTrans, self).__init__()
        TranMatrix = SVDSingleTransMatrix if tran_type == "svd" else InvSingleTransMatrix
        self.left_trans = TranMatrix(left_size, deriction="left", diag_relu=diag_relu)
        self.right_trans = TranMatrix(right_size, deriction="right", diag_relu=diag_relu)
        
        if add_diag:
            self.diag_trans = DiagonalTransMatrix(left_size * right_size, diag_init_para)
        else:
            self.diag_trans = None
        
        
    def forward(self, inp, inv_t=False):
        if self.diag_trans is not None:
            inp = self.diag_trans(inp, inv_t=inv_t)
        if self.right_trans is not None:
            inp = self.right_trans(inp, inv_t=inv_t)
        if self.left_trans is not None:
            inp = self.left_trans(inp, inv_t=inv_t)
        return inp

    def to_eval_mode(self):
        self.left_trans.to_eval_mode()
        self.right_trans.to_eval_mode()
        if self.diag_trans is not None:
            self.diag_trans.to_eval_mode()


# class SVDDecomposeTransMatrix(nn.Module):
#     def __init__(self, left_size, right_size, add_diag=False, diag_init_para=None):
#         super(SVDDecomposeTransMatrix, self).__init__()
#         self.linear_u_left = nn.Linear(left_size, left_size, bias=False)
#         self.linear_u_left.weight.data = get_init_weight(left_size).to(self.linear_u_left.weight)
#         self.linear_u_left = nn.utils.parametrizations.orthogonal(self.linear_u_left, orthogonal_map="cayley", use_trivialization=False)
#         self.linear_v_left = nn.Linear(left_size, left_size, bias=False)
#         self.linear_v_left.weight.data = get_init_weight(left_size).to(self.linear_v_left.weight)
#         self.linear_v_left = nn.utils.parametrizations.orthogonal(self.linear_v_left, orthogonal_map="cayley", use_trivialization=False)
#         self.linear_diag_left = torch.nn.Parameter(torch.ones(left_size), requires_grad=True)

#         self.linear_u_right = nn.Linear(right_size, right_size, bias=False)
#         self.linear_u_right.weight.data = get_init_weight(right_size).to(self.linear_u_right.weight)
#         self.linear_u_right = nn.utils.parametrizations.orthogonal(self.linear_u_right, orthogonal_map="cayley", use_trivialization=False)
#         self.linear_v_right = nn.Linear(right_size, right_size, bias=False)
#         self.linear_v_right.weight.data = get_init_weight(right_size).to(self.linear_v_right.weight)
#         self.linear_v_right = nn.utils.parametrizations.orthogonal(self.linear_v_right, orthogonal_map="cayley", use_trivialization=False)
#         self.linear_diag_right = torch.nn.Parameter(torch.ones(right_size), requires_grad=True)

#         self.add_diag = add_diag
#         self.use_diag = True
#         # smooth scale
#         if self.add_diag:
#             if diag_init_para is None:
#                 self.diag_scale = torch.nn.Parameter(torch.ones((left_size * right_size)), requires_grad=True)
#             else:
#                 self.diag_scale = torch.nn.Parameter(diag_init_para, requires_grad=True)
#         self._eval_mode = False

#     def forward(self, inp, inv_t=False):
#         if self.add_diag and self.use_diag:
#             diag_scale = self.diag_scale
#             if inv_t:
#                 inp = inp / diag_scale.to(inp)
#             else:
#                 inp = inp * self.diag_scale.to(inp)
#         if not self._eval_mode:
#             matrix_u_left, matrix_u_right = self.linear_u_left.weight, self.linear_u_right.weight
#             matrix_v_left, matrix_v_right = self.linear_v_left.weight, self.linear_v_right.weight
#             linear_diag_left, linear_diag_right = self.linear_diag_left,  self.linear_diag_right
#             if inv_t:
#                 linear_diag_left, linear_diag_right = 1 / linear_diag_left, 1 / linear_diag_right
#         else:
#             matrix_left, matrix_right = self.matrix_left, self.matrix_right
#             if inv_t:
#                 matrix_left, matrix_right = self.matrix_left_inv, self.matrix_right_inv

#             return kronecker_matmul(inp, matrix_left.to(inp), matrix_right.to(inp))
#         matrix_left, matrix_right = matrix_u_left @ torch.diag(linear_diag_left) @ matrix_v_left.t(), matrix_u_right @ torch.diag(linear_diag_right) @ matrix_v_right.t()
#         return kronecker_matmul(inp, matrix_left.to(inp), matrix_right.to(inp))
    
#     def reparameterize(self):
#         matrix_left = self.linear_u_left.weight @ torch.diag(self.linear_diag_left) @ self.linear_v_left.weight.t()
#         matrix_right = self.linear_u_right.weight @ torch.diag(self.linear_diag_right) @ self.linear_v_right.weight.t()
#         matrix_left_inv = self.linear_u_left.weight @ torch.diag(1 / self.linear_diag_left) @ self.linear_v_left.weight.t()
#         matrix_right_inv = self.linear_u_right.weight @ torch.diag(1 / self.linear_diag_right) @ self.linear_v_right.weight.t()
#         self.matrix_left = nn.Parameter(matrix_left, requires_grad=False)
#         self.matrix_right = nn.Parameter(matrix_right, requires_grad=False)
#         self.matrix_left_inv = nn.Parameter(matrix_left_inv, requires_grad=False)
#         self.matrix_right_inv = nn.Parameter(matrix_right_inv, requires_grad=False)
#         del self.linear_u_left, self.linear_diag_left, self.linear_v_left, self.linear_u_right, self.linear_diag_right, self.linear_v_right
            
#     def to_eval_mode(self):
#         if not self._eval_mode:
#             self.reparameterize()
#             self._eval_mode = True

#     def __repr__(self):
#         res = f"SVDDecomposeTransMatrix(_eval_mode={self._eval_mode}"
#         if hasattr(self, 'matrix_left'):
#             res += f", matrix.shape={self.matrix_left.shape}, matrix_right.shape={self.matrix_right.shape}, )"
#         else:
#             res += f", matrix.shape={self.linear_u_left.weight.shape}, linear_right.shape={self.linear_u_right.weight.shape}, )"
#         return res


# # ---------- transformation version of direct inverse ----------



# class InvDecomposeTransMatrix(nn.Module):
#     def __init__(self, left_size, right_size, add_diag=False, diag_init_para=None):
#         super(InvDecomposeTransMatrix, self).__init__()
#         linear_left = nn.Linear(left_size, left_size, bias=False)
#         linear_left.weight.data = get_init_weight(left_size).to(linear_left.weight)
#         self.linear_left = linear_left

#         linear_right = nn.Linear(right_size, right_size, bias=False)
#         linear_right.weight.data = get_init_weight(right_size).to(linear_right.weight)
#         self.linear_right = linear_right

#         self.add_diag = add_diag
#         self.use_diag = True
#         if self.add_diag:
#             if diag_init_para is None:
#                 self.diag_scale = torch.nn.Parameter(torch.ones((left_size * right_size)), requires_grad=True)
#             else:
#                 self.diag_scale = torch.nn.Parameter(diag_init_para, requires_grad=True)
#         self._eval_mode = False

#     def forward(self, inp, inv_t=False):
#         if self.add_diag and self.use_diag:
#             if inv_t:
#                 inp = inp / self.diag_scale.to(inp)
#             else:
#                 inp = inp * self.diag_scale.to(inp)
#         if not self._eval_mode:
#             matrix_left, matrix_right = self.linear_left.weight, self.linear_right.weight
#             if inv_t:
#                 matrix_left, matrix_right = get_inverse(matrix_left).T, get_inverse(matrix_right).T
#         else:
#             matrix_left, matrix_right = self.matrix_left, self.matrix_right
#             if inv_t:
#                 matrix_left, matrix_right = self.matrix_left_inv, self.matrix_right_inv
#         return kronecker_matmul(inp, matrix_left.to(inp), matrix_right.to(inp))

#     def to_eval_mode(self):
#         if not self._eval_mode:
#             self.matrix_left = nn.Parameter(self.linear_left.weight, requires_grad=False)
#             self.matrix_right = nn.Parameter(self.linear_right.weight, requires_grad=False)
#             self.matrix_left_inv = nn.Parameter(get_inverse(self.linear_left.weight).T, requires_grad=False)
#             self.matrix_right_inv = nn.Parameter(get_inverse(self.linear_right.weight).T, requires_grad=False)
#             del self.linear_left, self.linear_right
#             self._eval_mode = True

#     def __repr__(self):
#         res = f"InvDecomposeTransMatrix(_eval_mode={self._eval_mode}"
#         if hasattr(self, 'matrix_left'):
#             res += f", matrix.shape={self.matrix_left.shape}, matrix_right.shape={self.matrix_right.shape}, )"
#         else:
#             res += f", matrix.shape={self.linear_left.weight.shape}, linear_right.shape={self.linear_right.weight.shape}, )"
#         return res
