import math
from typing import Optional

import torch
import torch.nn as nn
import torch.nn.functional as F

from .lora import LoRA

DINOV2_ARCHS = {
    'dinov2_vits14': 384,
    'dinov2_vitb14': 768,
    'dinov2_vitl14': 1024,
    'dinov2_vitg14': 1536,
    'dinov2_vitb14_reg' : 768
}

class DINOV2EncoderLoRA(nn.Module):
    def __init__(
        self,
        model_name='dinov2_vitb14',
        r: int = 3,
    ):
        super().__init__()
        assert r > 0

        self.num_channels = DINOV2_ARCHS[model_name]
        
        self.encoder = torch.hub.load('facebookresearch/dinov2', model_name)
        
        for param in self.encoder.parameters():
            param.requires_grad = False

        # Add LoRA layers to the encoder
        self.lora_layers = list(range(len(self.encoder.blocks)))
        self.w_a = []
        self.w_b = []

        for i, block in enumerate(self.encoder.blocks):
            if i not in self.lora_layers:
                continue
            w_qkv_linear = block.attn.qkv
            dim = w_qkv_linear.in_features

            w_a_linear_q, w_b_linear_q = self._create_lora_layer(dim, r)
            w_a_linear_v, w_b_linear_v = self._create_lora_layer(dim, r)

            self.w_a.extend([w_a_linear_q, w_a_linear_v])
            self.w_b.extend([w_b_linear_q, w_b_linear_v])

            block.attn.qkv = LoRA(
                w_qkv_linear,
                w_a_linear_q,
                w_b_linear_q,
                w_a_linear_v,
                w_b_linear_v,
            )
        self._reset_lora_parameters()

    def _create_lora_layer(self, dim: int, r: int):
        w_a = nn.Linear(dim, r, bias=False)
        w_b = nn.Linear(r, dim, bias=False)
        return w_a, w_b

    def _reset_lora_parameters(self) -> None:
        for w_a in self.w_a:
            nn.init.kaiming_uniform_(w_a.weight, a=math.sqrt(5))
        for w_b in self.w_b:
            nn.init.zeros_(w_b.weight)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        B, C, H, W = x.shape
        x = self.encoder.prepare_tokens_with_masks(x)
        # Step 2: 把所有 block 都跑一遍
        for blk in self.encoder.blocks:
            x = blk(x)
        # get the patch embeddings - so we exclude the CLS token
        f = x[:, 1:]
        f = f.reshape((B, H // 14, W // 14, self.num_channels)).permute(0, 3, 1, 2)
        
        return f

    def save_parameters(self, filename: str) -> None:
        w_a, w_b = {}, {}
        w_a = {f"w_a_{i:03d}": self.w_a[i].weight for i in range(len(self.w_a))}
        w_b = {f"w_b_{i:03d}": self.w_b[i].weight for i in range(len(self.w_a))}

        torch.save({**w_a, **w_b}, filename)

    def load_parameters(self, filename: str) -> None:

        state_dict = torch.load(filename)

        # Load the LoRA parameters
        for i, w_A_linear in enumerate(self.w_a):
            saved_key = f"w_a_{i:03d}"
            saved_tensor = state_dict[saved_key]
            w_A_linear.weight = nn.Parameter(saved_tensor)

        for i, w_B_linear in enumerate(self.w_b):
            saved_key = f"w_b_{i:03d}"
            saved_tensor = state_dict[saved_key]
            w_B_linear.weight = nn.Parameter(saved_tensor)

