#  ------------------------------------------------------------------------------------------
#  Copyright (c) Microsoft Corporation. All rights reserved.
#  Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
#  ------------------------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F

import math
from typing import Optional, List
        

class Linear(nn.Module):
    # LoRA implemented in a dense layer
    def __init__(
        self, 
        in_features: int, 
        out_features: int, 
        r: int = 0, 
        lora_alpha: int = 1, 
    ):
        super().__init__()
        self.lora_A = nn.Parameter(torch.empty(r, in_features), requires_grad=True)
        self.lora_B = nn.Parameter(torch.empty(out_features, r), requires_grad=True)
        self.scaling = lora_alpha / r

        self.reset_parameters()


    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        nn.init.zeros_(self.lora_B)

   

    def forward(self, x):
        result = x @ self.lora_A.transpose(0, 1) @ self.lora_B.transpose(0, 1) * self.scaling
        return result

