import math
import torch.nn as nn
import torch.nn.functional as F
import torch


class LoRALinear(torch.nn.Module):
    def __init__(self, in_features, out_features, merge, rank=16, lora_alpha=16, dropout=0.5, **kwargs):
        super().__init__()

        assert rank > 0, 'rank should be greater than 0'

        self.in_features = in_features
        self.out_features = out_features
        self.merge = merge
        self.rank = rank
        self.lora_alpha = lora_alpha
        self.dropout_rate = dropout

        self.linear = nn.Linear(in_features, out_features)
        self.lora_b = torch.nn.Parameter(torch.zeros((out_features, rank)))
        self.lora_a = torch.nn.Parameter(torch.zeros((rank, in_features)))

        self.scaling = self.lora_alpha / self.rank
        self.linear.weight.requires_grad = False

        self.dropout = nn.Dropout(self.dropout_rate) if self.dropout_rate > 0 else nn.Identity()

        self.initial_weights()

    def initial_weights(self):
        nn.init.kaiming_uniform_(self.lora_a, a=math.sqrt(5))
        nn.init.zeros_(self.lora_b)

    def forward(self, x):
        if self.rank > 0 and self.merge:
            output = F.linear(x, self.linear.weight + self.lora_b @ self.lora_a * self.scaling, self.linear.bias)
            output = self.dropout(output)
            return output
        else:
            return self.dropout(self.linear(x))


if __name__ == '__main__':
    x = torch.randn((10, 20))
    print(f'--- x.shape: {x.shape}')

    lora_layer = LoRALinear(20, 30, rank=4, merge=True)
    res = lora_layer(x)
    print(f'--- res.shape: {res.shape}')

    a = torch.arange(1, 10, 1).resize(3, 3)
    b = torch.arange(1, 10, 1).resize(3, 3)

    a = torch.randn((2, 3, 4))
    b = torch.randn((2, 4, 5))

    (a @ b).shape

    i = -1
    _a_i = a[:, :-1, :]
    a_i = a[:, i:, :]
    a_i: torch.Tensor = a[:, i, :]
    a_i = a_i.unsqueeze(1)
    print(a.shape, _a_i.shape, a_i.shape)

    res_1 = a @ b
    res_1.shape
    res_2 = torch.cat((_a_i @ b, a_i @ b), dim=1)
    res_2.shape

    is_equal = (torch.sum(res_1 == res_2) == res_1.numel()).item()
    print(is_equal)


    i = -1
    _a_i = a[:i]
    a_i = a[i: i+1]

    _a_i @ b
    a_i @ b
    pass
