# Model initiation for PANTHER
import random

import torch
from torch import nn
import numpy as np
import math

from models.PANTHER.components import create_mlp_with_dropout
from models.PANTHER.layers import PANTHERBase
from utils.proto_utils import check_prototypes
import configs.prota_config as configs
import torch.nn.functional as F

from utils.file_utils import load_pkl


class ProTA(nn.Module):
    """
    Wrapper for PANTHER model
    """

    def __init__(self, config, out_size, proto_path='.', num_classes=3):
        super(ProTA, self).__init__()

        self.config = config
        emb_dim = config.emb_dim

        self.emb_dim = emb_dim
        self.heads = config.heads
        self.out_size = out_size
        self.load_proto = config.load_proto
        if config.load_proto:
            if proto_path.endswith('pkl'):
                weights = load_pkl(proto_path)['prototypes'].squeeze()
        self.proto=nn.Parameter(torch.from_numpy(weights), requires_grad=not config.fix_proto)
        self.transformer=PrototypicalTransformer(config)
        self.MLP = IndivMLPEmb(config, out_size, num_classes)

        check_prototypes(self.out_size, self.emb_dim, self.load_proto, proto_path)
        # This module contains the EM step
        self.panther = PANTHERBase(d=self.emb_dim, p=out_size, L=config.em_iter,
                                   tau=config.tau, out=config.out_type, ot_eps=config.ot_eps,
                                   load_proto=config.load_proto, proto_path=proto_path,
                                   fix_proto=config.fix_proto, fix_em_proto=config.fix_em_proto)

    def representation(self, x):
        """
        Construct unsupervised slide representation
        """
        proto = self.proto
        update_proto = self.transformer(x, proto)
        slide, qqs = self.panther(x, update_proto)

        return {'repr': slide, 'qq': qqs}
    def forward(self, x, label):

        proto=self.proto
        update_proto = self.transformer(x, proto)
        slide, qqs = self.panther(x, update_proto)
        logits, Y_prob, Y_hat, loss = self.MLP(slide, label)
        return logits, Y_prob, Y_hat, loss

class Mlp(nn.Module):
    def __init__(self, config):
        super(Mlp, self).__init__()
        self.fc1 = nn.Linear(config.feature_dim, config.transformer["mlp_dim"])
        self.fc2 = nn.Linear(config.transformer["mlp_dim"], config.feature_dim)
        self.act_fn = nn.GELU()
        self.dropout = nn.Dropout(config.transformer["dropout_rate"])

    def forward(self, x):
        x = self.fc1(x)
        x = self.act_fn(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.dropout(x)
        return x
class PrototypicalTransformer(nn.Module):
    def __init__(self, config):
        super(PrototypicalTransformer, self).__init__()

        self.feature_dim = config.feature_dim     # 1024
        self.embed_dim = config.attn_dim         # 512
        self.num_heads = config.num_heads        # 8
        self.head_dim = self.embed_dim // self.num_heads  # 64
        assert self.head_dim * self.num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"

        # QKV 投影
        self.q_proj = nn.Linear(self.feature_dim, self.embed_dim)
        self.k_proj = nn.Linear(self.feature_dim, self.embed_dim)
        self.v_proj = nn.Linear(self.feature_dim, self.embed_dim)

        # 输出投影
        self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)

        # LayerNorm & FFN
        self.norm1 = nn.LayerNorm(self.feature_dim)
        self.norm2 = nn.LayerNorm(self.feature_dim)

        self.ffn=Mlp( config)

        # Dropout
        self.attn_dropout = nn.Dropout(config.attn_dropout_rate)
        self.proj_dropout = nn.Dropout(config.proj_dropout_rate)

        self.align_proj = nn.Linear(self.embed_dim, self.feature_dim)

    def forward(self, data, prototypes):
        B = 1
        N, D = data.shape
        P, _ = prototypes.shape

        data = data.unsqueeze(0).float()           # [1, N, D]
        prototypes = prototypes.unsqueeze(0).float()  # [1, P, D]

        # QKV mapping
        Q = self.q_proj(prototypes)  # [1, P, E]
        K = self.k_proj(data)        # [1, N, E]
        V = self.v_proj(data)        # [1, N, E]

        # Multi-head splitting
        Q = Q.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)  # [B, H, P, Hd]
        K = K.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)  # [B, H, N, Hd]
        V = V.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)  # [B, H, N, Hd]

        # Attention scores
        attn = (Q @ K.transpose(-2, -1)) * (1.0 / math.sqrt(self.head_dim))
        attn = F.softmax(attn, dim=-1)
        attn = self.attn_dropout(attn)

        # Weighted aggregation
        x = (attn @ V).transpose(1, 2).contiguous().view(B, P, self.embed_dim)

        x = self.out_proj(x)
        x = self.proj_dropout(x)

        x = self.align_proj(x)      # [B, P, 512] → [B, P, 1024]
        x = x + prototypes
        x = self.norm1(x)

        # FFN
        x2 = self.ffn(x)
        x = x + x2
        x = self.norm2(x)

        updated_prototypes = x.squeeze(0)  # [P, 1024]
        return updated_prototypes

# MLP per prototype
#
class IndivMLPEmb(nn.Module):
    """
    Comprised of three MLP (in sequence), each of which can be enabled/disabled and configured accordingly
    - Shared: Shared MLP across prototypes for feature dimension reduction
    - Indiv: Individual MLP per prototype
    - Post: Shared MLP across prototypes for final feature dimension reduction
    """

    def __init__(self, config,out_size, num_classes=3):
        super().__init__()
        self.config = config
        self.n_classes = num_classes
        self.p = out_size
        self.loss_ce = nn.CrossEntropyLoss()
        mlp_func = create_mlp_with_dropout

        if config.shared_mlp:
            self.shared_mlp = mlp_func(in_dim=config.in_dim,
                                       hid_dims=[config.shared_embed_dim] *
                                                (config.n_fc_layers - 1),
                                       dropout=config.shared_dropout,
                                       out_dim=config.shared_embed_dim,
                                       end_with_fc=False)
            next_in_dim = config.shared_embed_dim
        else:
            self.shared_mlp = nn.Identity()
            next_in_dim = config.in_dim

        if config.indiv_mlps:
            self.indiv_mlps = nn.ModuleList([mlp_func(in_dim=next_in_dim,
                                                      hid_dims=[config.indiv_embed_dim] *
                                                               (config.n_fc_layers - 1),
                                                      dropout=config.indiv_dropout,
                                                      out_dim=config.indiv_embed_dim,
                                                      end_with_fc=False) for i in range(out_size)])
            next_in_dim = out_size * config.indiv_embed_dim
        else:
            self.indiv_mlps = nn.ModuleList([nn.Identity() for i in range(out_size)])
            next_in_dim = out_size * next_in_dim

        if config.postcat_mlp:
            self.postcat_mlp = mlp_func(in_dim=next_in_dim,
                                        hid_dims=[config.postcat_embed_dim] *
                                                 (config.n_fc_layers - 1),
                                        dropout=config.postcat_dropout,
                                        out_dim=config.postcat_embed_dim,
                                        end_with_fc=False)
            next_in_dim = config.postcat_embed_dim
        else:
            self.postcat_mlp = nn.Identity()

        self.classifier = nn.Linear(next_in_dim,
                                    self.n_classes,
                                    bias=False)

    def forward(self, h, label, attn_mask=None):
        h = self.shared_mlp(h)
        assert h.shape[1] == self.p, f"Expected {self.p} prototypes, got {h.shape[1]}"
        h = torch.stack([self.indiv_mlps[idx](h[:, idx, :]) for idx in range(self.p)], dim=1)
        h = h.reshape(h.shape[0], -1)  # (n_samples, n_proto * config.indiv_embed_dim)
        h = self.postcat_mlp(h)
        logits = self.classifier(h)
        out = {'logits': logits}
        Y_prob = F.softmax(out['logits'], dim=1)
        Y_hat = torch.topk(out['logits'], 1)[1]
        loss = self.loss_ce(logits, label)
        return logits, Y_prob, Y_hat, loss


CONFIGS = {
    'prota': configs.get_config()
}

if __name__ == '__main__':
    model = ProTA(config=CONFIGS['prota'],out_size=16,proto_path='/mnt/sda1/yxy_project/CLAM2024/cluster/proto/MSI_EXT_HCH_1e5_c16/0/prototypes_faiss_num_100000.pkl', num_classes=3).cuda()
    input_s = torch.randn((2000, 1024)).cuda()
    proto = torch.randn((16, 1024)).cuda()
    logits, Y_prob, Y_hat, loss = model(input_s, torch.LongTensor([random.randint(0, 2)]).cuda())
    print(logits, Y_prob, Y_hat, loss)