# --------------------------------------------------------
# References:
# https://github.com/jxhe/unify-parameter-efficient-tuning
# --------------------------------------------------------

import math
import torch
import torch.nn as nn
import argparse
from typing import Optional


class Lora(nn.Module):
    def __init__(
        self,
        config: argparse.Namespace,
        embed_dim: Optional[int] = None,
        bottleneck: Optional[int] = None,
        dropout: float = 0.0,
        adapter_scalar="1.0",
        adapter_layernorm_option="in",
    ):
        super().__init__()
        self._device = config._device
        self.n_embd = (
            config.embed_dim if embed_dim is None else embed_dim
        )
        self.down_size = (
            config.attn_bn if bottleneck is None else bottleneck
        )

        # layernorm
        self.adapter_layernorm_option = adapter_layernorm_option

        if (
            adapter_layernorm_option == "in"
            or adapter_layernorm_option == "out"
        ):
            self.adapter_layer_norm = nn.LayerNorm(self.n_embd)
            self.adapter_layer_norm.reset_parameters()

        if adapter_scalar == "learnable_scalar":
            self.scale = nn.Parameter(torch.ones(1))
        else:
            self.scale = float(adapter_scalar)

        self.down_proj = nn.Linear(self.n_embd, self.down_size, bias=False)
        self.act_func = nn.Identity()
        self.up_proj = nn.Linear(self.down_size, self.n_embd, bias=False)

        self.dropout = dropout

        with torch.no_grad():
            nn.init.kaiming_uniform_(self.down_proj.weight, a=math.sqrt(5))
            nn.init.zeros_(self.up_proj.weight)

    def forward(self, x, add_residual=False, residual=None):
        return self._forward(x, add_residual, residual)

    def _update(self):
        print("Update Lora: Not Implemented")

    def _forward(self, x, add_residual=False, residual=None):
        residual = x if residual is None else residual
        if self.adapter_layernorm_option == "in":
            x = self.adapter_layer_norm(x)

        down = self.down_proj(x)
        down = self.act_func(down)
        down = nn.functional.dropout(
            down, p=self.dropout, training=self.training
        )
        up = self.up_proj(down)

        up = up * self.scale

        if self.adapter_layernorm_option == "out":
            up = self.adapter_layer_norm(up)

        if add_residual:
            output = up + residual
        else:
            output = up

        return output


class Adapter(Lora):
    def __init__(
        self,
        config: argparse.Namespace,
        embed_dim: Optional[int] = None,
        bottleneck: Optional[int] = None,
        dropout: float = 0.0,
        adapter_scalar="1.0",
        adapter_layernorm_option="in",
    ):
        super().__init__(
            config,
            embed_dim,
            bottleneck,
            dropout,
            adapter_scalar,
            adapter_layernorm_option,
        )
        self.act_func = nn.ReLU()


_PEFT_dict = {
    "lora": Lora,
    "adapter": Adapter,
}
