import numpy as np
import torch
from torch import nn
import torch.nn.functional as F

from .cross_channel_Transformer import WordRepresentationTransformer
from .channel_mask import channel_mask_generator
from .ECR import ECR


class NeuroMax(nn.Module):
    def __init__(
        self,
        vocab_size,
        num_topics=50,
        num_groups=10,
        en_units=200,
        dropout=0.0,
        pretrained_WE=None,
        embed_size=200,
        beta_temp=0.2,
        weight_loss_ECR=250.0,
        alpha_ECR=20.0,
        sinkhorn_max_iter=1000,
        weight_loss_InfoNCE=10.0,
        use_topic_cohesion_loss=True,  # new
        weight_loss_topic_cohesion=1.0,
        top_n_words=10,
        num_hard_negatives=5,
        use_infonce_loss=True,  # new
    ):
        super().__init__()

        self.num_topics = num_topics
        self.num_groups = num_groups
        self.beta_temp = beta_temp

        self.a = 1 * np.ones((1, num_topics)).astype(np.float32)
        self.mu2 = nn.Parameter(
            torch.as_tensor((np.log(self.a).T - np.mean(np.log(self.a), 1)).T)
        )
        self.var2 = nn.Parameter(
            torch.as_tensor(
                (
                    ((1.0 / self.a) * (1 - (2.0 / num_topics))).T
                    + (1.0 / (num_topics * num_topics)) * np.sum(1.0 / self.a, 1)
                ).T
            )
        )

        self.mu2.requires_grad = False
        self.var2.requires_grad = False

        self.fc11 = nn.Linear(vocab_size, en_units)
        self.fc12 = nn.Linear(en_units, en_units)
        self.fc21 = nn.Linear(en_units, num_topics)
        self.fc22 = nn.Linear(en_units, num_topics)
        self.fc1_dropout = nn.Dropout(dropout)
        self.theta_dropout = nn.Dropout(dropout)

        self.mean_bn = nn.BatchNorm1d(num_topics)
        self.mean_bn.weight.requires_grad = False
        self.logvar_bn = nn.BatchNorm1d(num_topics)
        self.logvar_bn.weight.requires_grad = False
        self.decoder_bn = nn.BatchNorm1d(vocab_size, affine=True)
        self.decoder_bn.weight.requires_grad = False

        if pretrained_WE is not None:
            self.word_embeddings = torch.from_numpy(pretrained_WE).float()
        else:
            self.word_embeddings = nn.init.trunc_normal_(
                torch.empty(vocab_size, embed_size)
            )
        self.word_embeddings = nn.Parameter(F.normalize(self.word_embeddings))

        self.topic_embeddings = torch.empty((num_topics, self.word_embeddings.shape[1]))
        nn.init.trunc_normal_(self.topic_embeddings, std=0.1)
        self.topic_embeddings = nn.Parameter(F.normalize(self.topic_embeddings))

        self.ECR = ECR(weight_loss_ECR, alpha_ECR, sinkhorn_max_iter)

        # for cosine similarity loss
        self.lambda_cos = 1.0  # 可作为超参数传入
        self.topic_projection = nn.Linear(self.topic_embeddings.shape[1], en_units)

        # for InfoNCE
        self.prj_rep = nn.Sequential(
            nn.Linear(self.num_topics, 384), nn.Dropout(dropout)
        )
        self.prj_bert = nn.Sequential()
        self.use_infonce_loss = use_infonce_loss
        self.weight_loss_InfoNCE = weight_loss_InfoNCE
        self.use_topic_cohesion_loss = use_topic_cohesion_loss
        self.weight_loss_topic_cohesion = weight_loss_topic_cohesion
        self.top_n_words = top_n_words
        self.num_hard_negatives = num_hard_negatives

        # ------------------------ ----------------------
        self.mask_generator = channel_mask_generator(
            word_embeddings=self.word_embeddings,
            threshold=0.4,  # 阈值可以作为超参数传入
        )

        self.frequency_transformer = WordRepresentationTransformer(
            vocab_size=vocab_size,
            embed_size=embed_size,
            num_layers=2,  # 可以作为超参数传入
            nhead=4,  # 可以作为超参数传入
            dropout=dropout,
        )

    def get_beta(self):
        dist = self.pairwise_euclidean_distance(
            self.topic_embeddings, self.word_embeddings
        )
        beta = F.softmax(-dist / self.beta_temp, dim=0)
        return beta

    def reparameterize(self, mu, logvar):
        if self.training:
            std = torch.exp(0.5 * logvar)
            eps = torch.randn_like(std)
            return mu + (eps * std)
        else:
            return mu

    def get_representation(self, input):
        e1 = F.softplus(self.fc11(input))
        e1 = F.softplus(self.fc12(e1))
        e1 = self.fc1_dropout(e1)
        mu = self.mean_bn(self.fc21(e1))
        logvar = self.logvar_bn(self.fc22(e1))
        z = self.reparameterize(mu, logvar)
        theta = F.softmax(z, dim=1)
        return theta, mu, logvar

    def encode(self, input):
        theta, mu, logvar = self.get_representation(input)
        loss_KL = self.compute_loss_KL(mu, logvar)
        return theta, loss_KL

    def get_theta(self, bow):
        theta, loss_KL = self.encode(bow)
        if self.training:
            return theta, loss_KL
        else:
            return theta

    def sim(self, rep, bert):
        prep = self.prj_rep(rep)
        pbert = self.prj_bert(bert)
        return torch.exp(F.cosine_similarity(prep, pbert))

    def _compute_raw_similarity_matrix(self, rep, contextual_emb):
        prep = self.prj_rep(rep)
        pbert = self.prj_bert(contextual_emb)
        # Compute cosine similarity
        # (batch_size, hidden_dim) @ (hidden_dim, batch_size) -> (batch_size, batch_size)
        sim_matrix = F.cosine_similarity(prep.unsqueeze(1), pbert.unsqueeze(0), dim=-1)
        return sim_matrix

    # def compute_loss_InfoNCE(self, rep, contextual_emb):
    #     if self.weight_loss_InfoNCE <= 1e-6:
    #         return 0.0
    #     else:
    #         # Get raw similarity matrix (N x N)
    #         sim_matrix = self._compute_raw_similarity_matrix(rep, contextual_emb)

    #         batch_size = rep.size(0)

    #         # Get positive similarities (diagonal elements)
    #         positive_sims = sim_matrix.diag()

    #         # Create a mask to exclude positive samples for negative sampling
    #         negative_mask = ~torch.eye(batch_size, dtype=torch.bool, device=rep.device)

    #         # Get all negative similarities
    #         # Reshape to (batch_size, batch_size - 1)
    #         negative_sims = sim_matrix[negative_mask].view(batch_size, batch_size - 1)

    #         # Select hard negatives for each sample in the batch
    #         # If num_hard_negatives is greater than available negatives (batch_size - 1), take all available.
    #         num_neg_to_take = min(self.num_hard_negatives, batch_size - 1)
    #         hard_negative_sims, _ = torch.topk(
    #             negative_sims, num_neg_to_take, largest=True, dim=1
    #         )

    #         # Concatenate positive and hard negative similarities
    #         # positive_sims: (batch_size,) -> (batch_size, 1)
    #         # hard_negative_sims: (batch_size, num_neg_to_take)
    #         all_sims = torch.cat(
    #             [positive_sims.unsqueeze(1), hard_negative_sims], dim=1
    #         )

    #         # Apply temperature and log_softmax for InfoNCE
    #         scaled_sims = all_sims / self.beta_temp
    #         log_probs = F.log_softmax(scaled_sims, dim=-1)

    #         # The InfoNCE loss for each sample is the negative log probability of the positive sample
    #         # The positive sample is always at index 0 after concatenation
    #         loss_per_sample = -log_probs[:, 0]
    #         avg_loss = loss_per_sample.mean()

    #         return avg_loss * self.weight_loss_InfoNCE

    def compute_loss_InfoNCE(self, rep, contextual_emb):
        if not self.use_infonce_loss or self.weight_loss_InfoNCE <= 1e-6:
            return 0.0

        sim_matrix = self._compute_raw_similarity_matrix(rep, contextual_emb)
        batch_size = rep.size(0)

        positive_sims = sim_matrix.diag()

        # 提前缓存这个矩阵以避免每次重新创建
        if (
            not hasattr(self, "cached_negative_mask")
            or self.cached_negative_mask.size(0) != batch_size
        ):
            self.cached_negative_mask = ~torch.eye(
                batch_size, device=rep.device, dtype=torch.bool
            )

        negative_sims = sim_matrix[self.cached_negative_mask].view(
            batch_size, batch_size - 1
        )

        num_neg_to_take = min(self.num_hard_negatives, batch_size - 1)
        hard_negative_sims, _ = torch.topk(
            negative_sims, num_neg_to_take, largest=True, dim=1
        )

        all_sims = torch.cat([positive_sims.unsqueeze(1), hard_negative_sims], dim=1)

        log_probs = F.log_softmax(all_sims / self.beta_temp, dim=-1)

        avg_loss = -log_probs[:, 0].mean()

        return avg_loss * self.weight_loss_InfoNCE

    def compute_loss_KL(self, mu, logvar):
        var = logvar.exp()
        var_division = var / self.var2
        diff = mu - self.mu2
        diff_term = diff * diff / self.var2
        logvar_division = self.var2.log() - logvar
        # KLD: N*K
        KLD = 0.5 * (
            (var_division + diff_term + logvar_division).sum(axis=1) - self.num_topics
        )
        KLD = KLD.mean()
        return KLD

    def get_loss_ECR(self):
        cost = self.pairwise_euclidean_distance(
            self.topic_embeddings, self.word_embeddings
        )
        loss_ECR = self.ECR(cost)
        return loss_ECR

    def pairwise_euclidean_distance(self, x, y):
        cost = (
            torch.sum(x**2, axis=1, keepdim=True)
            + torch.sum(y**2, dim=1)
            - 2 * torch.matmul(x, y.t())
        )
        return cost

    def get_loss_topic_cohesion(self):
        if not self.use_topic_cohesion_loss or self.weight_loss_topic_cohesion <= 1e-6:
            return 0.0

        beta = self.get_beta()

        _, top_indices = torch.topk(beta, self.top_n_words, dim=1)

        # Gather top words' embeddings for all topics
        # top_indices: (num_topics, top_n_words)
        # self.word_embeddings: (vocab_size, embed_size)
        # gathered_word_vecs: (num_topics, top_n_words, embed_size)
        gathered_word_vecs = self.word_embeddings[top_indices]

        # Normalize topic embeddings and gathered word embeddings
        # topic_embeddings_norm: (num_topics, embed_size)
        topic_embeddings_norm = F.normalize(self.topic_embeddings, p=2, dim=1)
        # gathered_word_vecs_norm: (num_topics, top_n_words, embed_size)
        gathered_word_vecs_norm = F.normalize(gathered_word_vecs, p=2, dim=2)

        # Compute cosine similarity in a vectorized manner
        # (num_topics, top_n_words, embed_size) @ (num_topics, embed_size, 1)
        # -> (num_topics, top_n_words, 1)
        # Squeeze to (num_topics, top_n_words)
        cos_sim = torch.bmm(
            gathered_word_vecs_norm, topic_embeddings_norm.unsqueeze(-1)
        ).squeeze(-1)

        # Calculate cohesion loss for each topic
        # Sum (1 - cos_sim) over top_n_words for each topic
        # cohesion_loss_per_topic: (num_topics,)
        cohesion_loss_per_topic = torch.sum(1 - cos_sim, dim=1)

        # Average over all topics
        avg_cohesion_loss = cohesion_loss_per_topic.mean()

        return avg_cohesion_loss * self.weight_loss_topic_cohesion

    def forward(self, input, epoch_id=None):
        bow = input["data"]  # [200, 5000] # [batch_size, vocab_size]
        contextual_emb = input[
            "contextual_embed"
        ]  # [200, 384] #[batch_size, pre_trained_embed_size]

        # rep, mu, logvar = self.get_representation(z)
        # loss_KL = self.compute_loss_KL(mu, logvar)
        # theta = rep
        theta, loss_KL = self.encode(bow)
        rep = theta
        beta = self.get_beta()

        recon = F.softmax(self.decoder_bn(torch.matmul(theta, beta)), dim=-1)
        recon_loss = -(bow * recon.log()).sum(axis=1).mean()

        # --- 计算余弦相似度损失 ---
        # # 1. 找到每个文档的主导主题
        # dominant_topic_indices = torch.argmax(theta, dim=1)

        # # 2. 提取主导主题的嵌入
        # dominant_topic_embeddings = self.topic_embeddings[dominant_topic_indices]

        # # 3. 投影主题嵌入到与文档嵌入相同的维度
        # projected_topic_embeddings = self.topic_projection(dominant_topic_embeddings)

        # # 4. 计算余弦相似度损失
        # cos_sim = F.cosine_similarity(doc_embedding, projected_topic_embeddings, dim=1)
        # loss_cos = (1 - cos_sim).mean() * self.lambda_cos
        # ---

        loss_TM = recon_loss + loss_KL

        loss_ECR = self.get_loss_ECR()

        loss = loss_TM + loss_ECR
        rst_dict = {
            "loss": loss,
            "loss_TM": loss_TM,
            "loss_ECR": loss_ECR,
        }

        return rst_dict
