from utils import get_layers, count_params

import torch
import torch.nn as nn
import torch.nn.functional as F
import math

def prune_singular_value(s, threshold, percent):
    '''
    Args:
        s: singular values
        threshold: threshold for pruning
        percent: if True, threshold is the percentage of singular values to be pruned
    Returns:
        indices of singular values to be kept
    '''
    assert threshold >= 0 and threshold <= 1
    s = s.pow(2)
    indices = torch.argsort(s, descending=False)
    if percent:
        threshold = len(s) - int(threshold * len(s))
        indices = indices[:threshold]
    else:
        left_sum = threshold * s.sum()
        indices = indices[s[indices].cumsum(dim=0) > left_sum]
    return reversed(indices)


def prune_model(model, threshold, percent=False, 
                layers=None, global_prune=False, 
                score='l2', score_kwargs={},
                target_params=0):
    '''
    Args:
        model: nn.Module
        threshold: threshold for pruning
        percent: if True, threshold is the percentage of singular values to be pruned
        layers: layers to be pruned
        target_params: target parameters (only take effect when global_prune is True)
    '''
    model.zero_grad(set_to_none=True)
    if layers is not None:
        model = nn.Sequential(*get_layers(model, layers))
    
    if global_prune:
        return _global_prune_model(model, threshold, percent,
                                   score=score, score_kwargs=score_kwargs,
                                   target_params=target_params)

    for m in model.modules():
        if isinstance(m, SVD_Module):
            m.prune(threshold, percent)
    return model


def _global_prune_model(model, threshold, percent,
                        score='l2', score_kwargs={},
                        target_params=0):
    assert percent
    assert score in ['l1', 'l2', 'fisher']

    if score == 'l1':
        scores = _l1_score(model, **score_kwargs)
    elif score == 'l2':
        scores = _l2_score(model, **score_kwargs)
    elif score == 'fisher':
        scores = _fisher_score(model, **score_kwargs)
    else:
        raise NotImplementedError

    sensitivity = []
    for name, m in model.named_modules():
        if isinstance(m, SVD_Module):
            s = scores[name]
            for i in range(len(s)):
                sensitivity.append((name, i, s[i]))
    
    sensitivity = sorted(sensitivity, key=lambda x: x[2])
    pruned_num = int(len(sensitivity) * threshold)

    for name, i, _ in sensitivity[:pruned_num]:
        m = model
        for n in name.split('.'):
            m = getattr(m, n)
        m.u.data = torch.cat([m.u.data[:, :i], m.u.data[:, i+1:]], dim=1)
        m.s.data = torch.cat([m.s.data[:i], m.s.data[i+1:]])
        m.v.data = torch.cat([m.v.data[:, :i], m.v.data[:, i+1:]], dim=1)

        if target_params > 0 and count_params(model) <= target_params:
            break
    return model


def _l1_score(model, **kwargs):
    '''
    Args: model: nn.Module
    Returns: l1 score of each singular value
    '''
    scores = {}
    for name, m in model.named_modules():
        if isinstance(m, SVD_Module):
            scores[name] = m.s.abs() / m.s.abs().sum()
    return scores


def _l2_score(model, **kwargs):
    '''
    Args: model: nn.Module
    Returns: l2 score of each singular value
    '''
    scores = {}
    for name, m in model.named_modules():
        if isinstance(m, SVD_Module):
            scores[name] = m.s.pow(2) / m.s.pow(2).sum()
    return scores


def _fisher_score(model, train_loader, **kwargs):
    '''
    Args:
        model: nn.Module
        train_loader: train loader
    Returns:
        fisher score of each singular value
    '''
    def usv(u, s, v, i):
        return (u[:, i].reshape(-1, 1) @ v[:, i].reshape(1, -1)) * s[i]
    
    def get_fisher(u, s, v, du, ds, dv):
        # grad = [(usv(u, s, v, i).mean() - \
        #         usv(u - alpha * du, s - alpha * ds, v - alpha * dv, i).mean()) / alpha
        #         for i in range(s.size(0))]
        grad = [(usv(u, s, dv, i) + usv(u, ds, v, i) + usv(du, s, v, i)).pow(2).mean()
                for i in range(s.size(0))]
        fisher = torch.Tensor(grad)
        return fisher
    
    fisher_score = {}
    def update_fisher():
        for name, m in model.named_modules():
            if isinstance(m, SVD_Module):
                if name not in fisher_score:
                    fisher_score[name] = 0
                fisher = get_fisher(m.u.data, m.s.data, m.v.data,
                                    m.u.grad.data, m.s.grad.data, m.v.grad.data)
                fisher_score[name] += fisher

    model.zero_grad(set_to_none=True)
    model.eval()

    for x, y in train_loader:
        x = x.cuda()
        y = y.cuda()
        out = model(x)
        loss = F.cross_entropy(out, y)
        loss.backward()

        update_fisher()
        model.zero_grad()

    # fisher_score = {}
    # for name, m in model.named_modules():
    #     if isinstance(m, SVD_Module):
    #         fisher = get_fisher(m.u.data, m.s.data, m.v.data,
    #                             m.u.grad.data, m.s.grad.data, m.v.grad.data)
    #         fisher_score[name] = fisher / fisher.sum()
    for key in fisher_score.keys():
        fisher_score[key] = fisher_score[key] / fisher_score[key].sum()
    return fisher_score


def init_svd_model_from_normal(svd, normal):
    '''
    Args:
        svd: svd model
        normal: normal model
    Returns: svd model
    '''
    for (name1, m1), (name2, m2) in zip(svd.named_modules(), normal.named_modules()):
        assert name1 == name2
        if name1 == '':
            continue
        elif isinstance(m1, SVD_Module):
            # print(name1)
            m1.init_from_normal_module(m2)
        elif not list(m2.children()):
            m1.load_state_dict(m2.state_dict())
    return svd


def init_svd_model_from_state_dict(svd, state_dict):
    s = set()
    for name, p in svd.named_parameters():
        if name in state_dict:
            p.data = state_dict[name]
            s.add(name)
        else:
            print('Warning: {} not found in state_dict'.format(name))
    
    for name, b in svd.named_buffers():
        if name in state_dict:
            b.data = state_dict[name]
            s.add(name)
        else:
            print('Warning: {} not found in state_dict'.format(name))

    # check if all parameters are loaded
    assert s == set(state_dict.keys())

    return svd


def svd_penalty(model, lambd, mu, svr_method='l1'):
    '''
    Args:
        model: nn.Module
        lambd: coefficient of orthogonal regularization
        mu: coefficient of singular value regularization
    '''
    penalty = 0
    for m in model.modules():
        if isinstance(m, SVD_Module):
            penalty += m.penalty(lambd, mu, svr_method)
    return penalty


def orthogonal_regularization(u, rank=None):
    assert u.shape[0] >= u.shape[1]
    penalty = torch.linalg.matrix_norm(
        u.t().matmul(u) - torch.eye(u.shape[1], device=u.device))
    if rank is not None:
        penalty /= rank ** 2

    if penalty.isnan():
        print("Warning: orthogonal regularization is nan")
    return penalty


def singular_value_regularization(s, method='l1'):
    if method == 'l1':
        penalty = torch.linalg.norm(s, 1)
    elif method == 'hoyer':
        penalty = torch.linalg.norm(s, 1) / torch.linalg.norm(s, 2)
    elif method == 'hoyer-square':
        penalty = torch.linalg.norm(s, 1) ** 2 / s.pow(2).sum() - 1
    else:
        raise NotImplementedError
    
    if penalty.isnan():
        print("Warning: singular value regularization is nan")
    return penalty


class SVD_Module(nn.Module):
    def __init__(self) -> None:
        super(SVD_Module, self).__init__()
        self.u = None
        self.s = None
        self.v = None
        self.fixed_rank = 0


    def init_from_normal_module(self, module):
        '''
        initialize SVD_Module from normal nn.Module
        '''
        raise NotImplementedError


    @torch.no_grad()
    def prune(self, threshold, percent):
        if self.fixed_rank > 0: return

        indices = prune_singular_value(self.s, threshold, percent)
        self.u.data = self.u[:, indices].contiguous()
        self.s.data = self.s[indices].contiguous()
        self.v.data = self.v[:, indices].contiguous()


    def penalty(self, lambd, mu, svr_method='l1'):
        '''
        Args:
            lambd: coefficient of orthogonal regularization
            mu: coefficient of singular value regularization
            svr_method: method for singular value regularization
        '''
        return lambd * (orthogonal_regularization(self.u) + orthogonal_regularization(self.v)) + \
                mu * singular_value_regularization(self.s, svr_method)



class SVD_Conv2d(SVD_Module):
    '''
    Args:
        in_channels, out_channels, kernel_size, stride, padding, bias: same as nn.Conv2d
        fixed_rank: if fixed_rank > 0, use fixed rank decomposition, else use full rank decomposition
        decomposition_mode: 'channel' or 'spatial'
    '''
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, 
                 fixed_rank=0, decomposition_mode='channel'):
        super(SVD_Conv2d, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.fixed_rank = fixed_rank
        self.decomposition_mode = decomposition_mode

        # original:
        #     K.shape = (out_channels, in_channels, kernel_size, kernel_size)
        #
        # channel-wise decomposition:
        #     K.shape = (out_channels, in_channels * kernel_size * kernel_size)
        #     => U.shape = (out_channels, rank)
        #        S.shape = (rank, rank)
        #        V.shape = (in_channels * kernel_size * kernel_size, rank)
        #
        # spatial-wise decomposition:
        #     K.shape = (out_channels * kernel_size, in_channels * kernel_size)
        #     => U.shape = (out_channels * kernel_size, rank)
        #        S.shape = (rank, rank)
        #        V.shape = (in_channels * kernel_size, rank)
        #
        # if fixed_rank is not set by user, use full rank

        if decomposition_mode == 'channel':
            self.full_rank = min(out_channels, in_channels * kernel_size * kernel_size)
            rank = fixed_rank if fixed_rank > 0 else self.full_rank
            self.u = nn.Parameter(torch.empty(out_channels, rank))
            self.s = nn.Parameter(torch.empty(rank))
            self.v = nn.Parameter(torch.empty(in_channels * kernel_size * kernel_size, rank))
        elif decomposition_mode == 'spatial':
            self.full_rank = min(out_channels * kernel_size, in_channels * kernel_size)
            rank = fixed_rank if fixed_rank > 0 else self.full_rank
            self.u = nn.Parameter(torch.empty(out_channels * kernel_size, rank))
            self.s = nn.Parameter(torch.empty(rank))
            self.v = nn.Parameter(torch.empty(in_channels * kernel_size, rank))
        else:
            raise ValueError('decomposition_mode must be channel or spatial')
        
        if bias:
            self.bias = nn.Parameter(torch.empty(out_channels))
        else:
            self.bias = None
        
        # initialize parameters
        self.reset_parameters()


    def reset_parameters(self):
        # initialize U and V as semi orthogonal matrix
        # when U.shape[0] >= U.shape[1], U^T * U = I
        nn.init.orthogonal_(self.u)
        nn.init.normal_(self.s)
        nn.init.orthogonal_(self.v)

        if self.bias is not None:
            w = self.u.matmul(torch.diag(self.s)).matmul(self.v.t())
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(w)
            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
            nn.init.uniform_(self.bias, -bound, bound)


    def forward(self, x):
        rank = self.s.shape[0]
        if self.decomposition_mode == 'channel':
            # V = CHW * R
            # K1 = R * C * H * W
            weight_conv1 = self.v\
                .view(self.in_channels, self.kernel_size, self.kernel_size, rank)\
                .permute(3, 0, 1, 2)

            # U @ S = N * R
            # K2 = N * R * 1 * 1
            weight_conv2 = self.u\
                .matmul(torch.diag(self.s))\
                .view(self.out_channels, rank, 1, 1)

            x = F.conv2d(x, weight_conv1, None, self.stride, self.padding)
            x = F.conv2d(x, weight_conv2, self.bias, 1, 0)

        else: # spatial-wise decomposition
            # V = CW * R
            # K1 = R * C * 1 * W
            weight_conv1 = self.v\
                .view(self.in_channels, 1, self.kernel_size, rank)\
                .permute(3, 0, 1, 2)

            # U @ S = NH * R
            # K2 = N * R * H * 1
            weight_conv2 = self.u\
                .matmul(torch.diag(self.s))\
                .view(self.out_channels, self.kernel_size, rank, 1)\
                .permute(0, 2, 1, 3)
            
            x = F.conv2d(x, weight_conv1, None, (1, self.stride), (0, self.padding))
            x = F.conv2d(x, weight_conv2, self.bias, (self.stride, 1), (self.padding, 0))
        
        return x


    def init_from_normal_module(self, module):
        assert isinstance(module, nn.Conv2d)
        assert module.in_channels == self.in_channels
        assert module.out_channels == self.out_channels
        assert module.kernel_size == (self.kernel_size, self.kernel_size)
        assert module.stride == (self.stride, self.stride)
        assert module.padding == (self.padding, self.padding)
        if self.bias is None: assert module.bias is None
        if self.bias is not None: assert module.bias is not None
        
        if self.decomposition_mode == 'channel':
            weight = module.weight\
                .view(self.out_channels, self.in_channels * self.kernel_size * self.kernel_size)
        else: # spatial-wise decomposition
            weight = module.weight\
                .permute(0, 2, 1, 3)\
                .reshape(self.out_channels * self.kernel_size, self.in_channels * self.kernel_size)

        u, s, v = torch.svd(weight)
        rank = self.fixed_rank if self.fixed_rank > 0 else self.full_rank
        self.u.data = u[:, :rank]
        self.s.data = s[:rank]
        self.v.data = v[:, :rank]

        if self.bias is not None:
            self.bias.data = module.bias.data

        return self




class SVD_Linear(SVD_Module):
    '''
    Args:
        in_features, out_features, bias: same as nn.Linear
        fixed_rank: if fixed_rank > 0, use fixed rank decomposition, else use full rank decomposition
    '''
    def __init__(self, in_features, out_features, bias=True, fixed_rank=0):
        super(SVD_Linear, self).__init__()

        self.in_features = in_features
        self.out_features = out_features
        self.full_rank = min(in_features, out_features)
        self.fixed_rank = fixed_rank

        # W.shape = (out_features, in_features)
        # => U.shape = (out_features, rank)
        #    S.shape = (rank, rank)
        #    V.shape = (in_features, rank)
        #
        # if fixed_rank is not set by user, use full rank

        rank = fixed_rank if fixed_rank > 0 else self.full_rank
        self.u = nn.Parameter(torch.empty(out_features, rank))
        self.s = nn.Parameter(torch.empty(rank))
        self.v = nn.Parameter(torch.empty(in_features, rank))

        if bias:
            self.bias = nn.Parameter(torch.empty(out_features))
        else:
            self.bias = None

        # initialize parameters
        self.reset_parameters()


    def reset_parameters(self):
        nn.init.orthogonal_(self.u)
        nn.init.normal_(self.s)
        nn.init.orthogonal_(self.v)

        if self.bias is not None:
            w = self.u.matmul(torch.diag(self.s)).matmul(self.v.t())
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(w)
            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
            nn.init.uniform_(self.bias, -bound, bound)


    def forward(self, x):
        # output = U * S * V^T * x + b
        x = F.linear(x, self.v.t())
        x = F.linear(x, torch.diag(self.s))
        x = F.linear(x, self.u)
        if self.bias is not None:
            x = x + self.bias
        return x
    

    def init_from_normal_module(self, module):
        assert isinstance(module, nn.Linear)
        assert module.in_features == self.in_features
        assert module.out_features == self.out_features
        if self.bias is None: assert module.bias is None
        if self.bias is not None: assert module.bias is not None

        w = module.weight.data
        u, s, v = torch.svd(w)
        self.u.data = u
        self.s.data = s
        self.v.data = v

        if self.bias is not None:
            self.bias.data = module.bias.data

        return self




###################################### tests #######################################
import unittest
import random
import time

class SVD_Test(unittest.TestCase):
    
    def test_conv2d(self):
        for i in range(10):
            in_channels = random.randint(1, 256)
            out_channels = random.randint(1, 256)
            kernel_size = random.randint(1, 10)
            stride = random.randint(1, 10)
            padding = random.randint(0, 10)
            fixed_rank = random.randint(0, 6)
            bias = random.choice([True, False])
            decomposition_mode = random.choice(['channel', 'spatial'])

            svd_conv = SVD_Conv2d(in_channels, out_channels, kernel_size,
                                  stride, padding, bias, fixed_rank, decomposition_mode)
            conv = nn.Conv2d(in_channels, out_channels, kernel_size,
                             stride, padding, bias=bias)
            svd_conv.eval()
            conv.eval()

            for _ in range(10):
                H = random.randint(32, 448)
                W = random.randint(32, 448)
                batch_size = random.randint(1, 32)
                x = torch.randn(batch_size, in_channels, H, W)
            
                with torch.no_grad():
                    self.assertEqual(svd_conv(x).shape, conv(x).shape)

            print(f'test_conv2d: test {i} passed')
        print('test_conv2d: accpeted')


    def test_linear(self):
        for i in range(10):
            in_features = random.randint(1, 256)
            out_features = random.randint(1, 256)
            fixed_rank = random.randint(0, 6)
            bias = random.choice([True, False])

            svd_linear = SVD_Linear(in_features, out_features, bias, fixed_rank)
            linear = nn.Linear(in_features, out_features, bias=bias)
            svd_linear.eval()
            linear.eval()

            for _ in range(10):
                batch_size = random.randint(1, 32)
                x = torch.randn(batch_size, in_features)

                with torch.no_grad():
                    self.assertEqual(svd_linear(x).shape, linear(x).shape)

            print(f'test_linear: test {i} passed')
        print('test_linear: accpeted')
    
    
    def test_prune(self):
        from resnet import resnet18
        # from torchvision.models import resnet18
        from dataset import mini_imagenet
        from train_test import test_iter
        from utils import count_params

        dataset_path = r'D:\Documents\Python\Datasets\mini-imagenet'
        valid_loader = mini_imagenet(dataset_path, train=False, batch_size=4)

        model = resnet18()
        model.load_state_dict(torch.load('pretrained/resnet18-f37072fd.pth'))
        model.cuda()
        accuracy = test_iter(model, valid_loader, [1, 5])
        params = count_params(model)
        print(f'Top1 accuracy of original model: {accuracy[0]:.3f}')
        print(f'Top5 accuracy of original model: {accuracy[1]:.3f}')
        print(f'Params of original model: {params / 1e6:.3f}M')

        svd_model = resnet18(Conv2d=SVD_Conv2d, Linear=SVD_Linear)
        init_svd_model_from_normal(svd_model, model)
        svd_model.cuda()
        accuracy = test_iter(svd_model, valid_loader, [1, 5])
        params = count_params(svd_model)
        print(f'Top1 accuracy before pruning: {accuracy[0]:.3f}')
        print(f'Top5 accuracy before pruning: {accuracy[1]:.3f}')
        print(f'Params before pruning: {params / 1e6:.3f}M')

        pruned_model = prune_model(svd_model, 0.05)
        pruned_model.cuda()
        accuracy = test_iter(pruned_model, valid_loader, [1, 5])
        params = count_params(pruned_model)
        print(f'Top1 accuracy after pruning: {accuracy[0]:.3f}')
        print(f'Top5 accuracy after pruning: {accuracy[1]:.3f}')
        print(f'Params after pruning: {params / 1e6:.3f}M')

        print('test_prune: accpeted')
    

    def test_prune2(self):
        from resnet import resnet20

        normal_model = resnet20().cuda()
        svd_model = resnet20(Conv2d=SVD_Conv2d, Linear=SVD_Linear)
        svd_model = prune_model(svd_model, 0.99)
        svd_model.cuda()

        data = torch.randn(4, 3, 32, 32).cuda()
        self.assertEqual(svd_model(data).shape, normal_model(data).shape)
    

    def test_prune3(self):
        from utils import split_layers
        from resnet import resnet20
        from copy import deepcopy

        model = resnet20(Conv2d=SVD_Conv2d, Linear=SVD_Linear)
        teacher = deepcopy(model)

        layer_groups = split_layers(model)
        prune_model(model, 0.01, layer_groups[0])
        assert teacher.conv1.s.shape != model.conv1.s.shape

        data = torch.randn(4, 3, 32, 32)
        self.assertEqual(teacher(data).shape, model(data).shape)


    def test_spatial(self):
        from resnet import resnet18
        svd_model = resnet18(Conv2d=SVD_Conv2d, Linear=SVD_Linear, decomposition_mode='spatial')
        for m in svd_model.modules():
            if isinstance(m, SVD_Conv2d):
                self.assertEqual(m.decomposition_mode, 'spatial')
        print('test_spatial: accpeted')

