# -*- coding: utf-8 -*-
import os
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
import torch
import torch.nn.functional as F
from torch.cuda.amp import autocast


def contrastive_loss(z1: torch.Tensor, z2: torch.Tensor, adj_sparse_cpu: torch.Tensor,
                     mean: bool = True, tau: float = 1.0, hidden_norm: bool = True,
                     block_size_for_gpu: int = 1024):
    """
    Calculates the contrastive loss between two sets of embeddings.
    It's a wrapper for nei_con_loss, applying it in both directions (z1->z2 and z2->z1).
    """
    # adj_sparse_cpu is assumed to be a torch.sparse_coo_tensor on CPU,
    # unweighted (values are 1.0), and with no self-loops.
    if not adj_sparse_cpu.is_sparse or adj_sparse_cpu.device.type != 'cpu':
        raise ValueError("adj_sparse_cpu must be a PyTorch sparse COO tensor on CPU.")

    l1 = nei_con_loss(z1, z2, tau, adj_sparse_cpu, hidden_norm, block_size_for_gpu)
    l2 = nei_con_loss(z2, z1, tau, adj_sparse_cpu, hidden_norm, block_size_for_gpu)

    ret = (l1 + l2) * 0.5
    ret = ret.mean() if mean else ret.sum()
    return ret


def multihead_contrastive_loss(heads: list, adj_sparse_cpu: torch.Tensor,
                               tau: float = 1.0, block_size_for_gpu: int = 1024):
    """
    Calculates the contrastive loss across multiple attention heads.
    It computes the loss between the first head and all other heads.
    """
    if not adj_sparse_cpu.is_sparse or adj_sparse_cpu.device.type != 'cpu':
        raise ValueError("adj_sparse_cpu must be a PyTorch sparse COO tensor on CPU.")

    if not heads:
        device_to_use = torch.device('cpu')
        print(
            f"Warning: 'heads' list is empty in multihead_contrastive_loss. Returning 0 loss on device {device_to_use}")
        return torch.tensor(0.0, dtype=torch.float32, device=device_to_use)

    loss_accumulator_device = heads[0].device
    loss_accumulator = torch.tensor(0.0, dtype=torch.float32, device=loss_accumulator_device)

    if len(heads) > 1:
        for i in range(1, len(heads)):
            loss = contrastive_loss(
                heads[0], heads[i], adj_sparse_cpu, tau=tau,
                hidden_norm=True,
                block_size_for_gpu=block_size_for_gpu
            )
            loss_accumulator = loss_accumulator + loss
        return loss_accumulator / float(len(heads) - 1)
    return loss_accumulator


def sim(z1: torch.Tensor, z2: torch.Tensor, hidden_norm: bool = True):
    """Computes similarity between two tensors of embeddings."""
    if hidden_norm:
        z1_normalized = F.normalize(z1)
        z2_normalized = F.normalize(z2)
    else:
        z1_normalized = z1
        z2_normalized = z2
    return torch.mm(z1_normalized, z2_normalized.t())


def nei_con_loss(z1_orig: torch.Tensor, z2_orig: torch.Tensor,
                 tau: float, adj_sparse_cpu: torch.Tensor,
                 hidden_norm: bool = True, block_size: int = 1024):
    '''
    Neighbor contrastive loss with block-wise GPU processing for large similarity matrices
    and a CPU-based sparse adjacency matrix to conserve GPU memory.
    '''
    N = z1_orig.shape[0]
    original_device = z1_orig.device

    # --- Pre-calculate neighbor count on CPU using the sparse adjacency matrix ---
    # adj_sparse_cpu is assumed to have no self-loops and its values are 1.0.
    if adj_sparse_cpu._nnz() > 0:
        degree_cpu = torch.sparse.sum(adj_sparse_cpu, dim=1).to_dense().squeeze()
    else:
        degree_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)
    # The number of positive pairs for each node is (intra-view neighbors + inter-view neighbors + self-pair)
    nei_count_cpu = (degree_cpu * 2 + 1.0).to(torch.float32)

    # Normalize embeddings once on the original device
    norm_z1_full_dev = F.normalize(z1_orig) if hidden_norm else z1_orig
    norm_z2_full_dev = F.normalize(z2_orig) if hidden_norm else z2_orig

    # Initialize result tensors on CPU
    all_diag_inter_view_sim_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)
    all_sum_adj_intra_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)
    all_sum_adj_inter_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)
    all_sum_intra_view_sim_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)
    all_sum_inter_view_sim_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)
    all_diag_intra_view_sim_cpu = torch.zeros(N, device='cpu', dtype=torch.float32)

    exp_func = lambda x: torch.exp(x / tau)

    # Get indices and values of the sparse CPU adjacency matrix once
    adj_sparse_cpu_indices = adj_sparse_cpu._indices()
    adj_sparse_cpu_values = adj_sparse_cpu._values()

    # Process in blocks to avoid large intermediate similarity matrices on GPU
    for i in range(0, N, block_size):
        start_idx = i
        end_idx = min(i + block_size, N)
        current_block_actual_size = end_idx - start_idx
        if current_block_actual_size == 0: continue

        norm_z1_block_dev = norm_z1_full_dev[start_idx:end_idx, :]

        # --- Slice the sparse adjacency matrix for the current block ---
        if adj_sparse_cpu._nnz() > 0:
            row_indices_in_block_mask = (adj_sparse_cpu_indices[0, :] >= start_idx) & \
                                        (adj_sparse_cpu_indices[0, :] < end_idx)
            block_remapped_row_indices = adj_sparse_cpu_indices[0, row_indices_in_block_mask] - start_idx
            block_col_indices = adj_sparse_cpu_indices[1, row_indices_in_block_mask]
            block_new_indices = torch.stack([block_remapped_row_indices, block_col_indices], dim=0)
            adj_block_sparse_cpu = torch.sparse_coo_tensor(
                indices=block_new_indices,
                values=adj_sparse_cpu_values[row_indices_in_block_mask],
                size=torch.Size([current_block_actual_size, N]),
                device='cpu'
            ).coalesce()
        else:
            adj_block_sparse_cpu = torch.sparse_coo_tensor(
                indices=torch.empty((2, 0), dtype=torch.long, device='cpu'),
                values=torch.empty(0, dtype=torch.float32, device='cpu'),
                size=torch.Size([current_block_actual_size, N]), device='cpu'
            )

        # Move the small dense block of adjacency matrix to GPU
        adj_block_dense_dev = adj_block_sparse_cpu.to_dense().to(original_device)

        # Calculate similarity matrices for the block on GPU
        exp_intra_view_sim_block_dev = exp_func(torch.mm(norm_z1_block_dev, norm_z1_full_dev.t()))
        exp_inter_view_sim_block_dev = exp_func(torch.mm(norm_z1_block_dev, norm_z2_full_dev.t()))

        # --- Calculate components of the loss formula and move results to CPU ---
        all_diag_inter_view_sim_cpu[start_idx:end_idx] = torch.diagonal(
            exp_inter_view_sim_block_dev[:, start_idx:end_idx]).to('cpu')
        all_sum_adj_intra_cpu[start_idx:end_idx] = (exp_intra_view_sim_block_dev * adj_block_dense_dev).sum(dim=1).to(
            'cpu')
        all_sum_adj_inter_cpu[start_idx:end_idx] = (exp_inter_view_sim_block_dev * adj_block_dense_dev).sum(dim=1).to(
            'cpu')
        all_sum_intra_view_sim_cpu[start_idx:end_idx] = exp_intra_view_sim_block_dev.sum(dim=1).to('cpu')
        all_sum_inter_view_sim_cpu[start_idx:end_idx] = exp_inter_view_sim_block_dev.sum(dim=1).to('cpu')
        all_diag_intra_view_sim_cpu[start_idx:end_idx] = torch.diagonal(
            exp_intra_view_sim_block_dev[:, start_idx:end_idx]).to('cpu')

        # Free up GPU memory
        del adj_block_dense_dev, exp_intra_view_sim_block_dev, exp_inter_view_sim_block_dev
        if original_device.type == 'cuda':
            torch.cuda.empty_cache()

    # --- Assemble the loss on CPU ---
    # Numerator: Sum of similarities of positive pairs
    numerator_cpu = all_diag_inter_view_sim_cpu + all_sum_adj_intra_cpu + all_sum_adj_inter_cpu
    # Denominator: Sum of similarities of all pairs (excluding self-view, self-pair)
    denominator_cpu = (all_sum_intra_view_sim_cpu + all_sum_inter_view_sim_cpu - all_diag_intra_view_sim_cpu + 1e-8)

    loss_ratio_cpu = numerator_cpu / denominator_cpu

    # --- [CRITICAL FIX] Apply the missing normalization and log transformation ---
    # 1. Normalize by the number of positive pairs for each node.
    normalized_loss_cpu = loss_ratio_cpu / (nei_count_cpu + 1e-8)

    # 2. Compute the final negative log-likelihood. Add epsilon to prevent log(0).
    final_loss_per_node = -torch.log(normalized_loss_cpu + 1e-8)

    # Return the final loss tensor, moved back to the original device for backpropagation
    return final_loss_per_node.to(original_device)