# -*- coding: utf-8 -*-
import os
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"
import scipy.sparse as sp
import torch
import torch.nn.functional as F
import dgl
import numpy as np
import time
from sklearn import metrics
from sklearn.cluster import KMeans
import scanpy as sc
import pandas as pd
from sklearn.decomposition import PCA
import psutil
import resource

# Assuming these relative imports point to your actual project files
from .utils import Transfer_Data
from .utils import ZINB
from .utils import refine_label
from .process import set_seed
from .gat import GAT
from .loss import multihead_contrastive_loss


def get_memory_usage():
    """获取当前进程的内存使用量(MB)"""
    try:
        process = psutil.Process(os.getpid())
        mem = process.memory_info().rss / (1024 ** 2)
    except:
        mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024
    return mem


def print_gpu_memory(device, message=""):
    """打印GPU显存使用情况"""
    if torch.cuda.is_available() and device.type == 'cuda':
        allocated = torch.cuda.memory_allocated(device) / (1024 ** 2)
        reserved = torch.cuda.memory_reserved(device) / (1024 ** 2)
        print(f"[GPU Memory ({device})] {message} Allocated: {allocated:.2f} MB, Reserved: {reserved:.2f} MB")
    else:
        print(f"[GPU Memory] {message} No GPU available or device is CPU")


def train(adata: sc.AnnData, k: int = 0, hidden_dims: int = 3000, n_epochs: int = 100,
          num_hidden: int = 100, lr: float = 0.00008, key_added: str = 'SpaGRA',
          a: float = 0.1, b: float = 1.0, c: float = 0.5, radius: int = 50,
          weight_decay: float = 0.0001, random_seed: int = 0, feat_drop: float = 0.01,
          attn_drop: float = 0.1, negative_slope: float = 0.01, heads: int = 4,
          method: str = "kmeans", reso: float = 1.0,
          block_size_for_loss: int = 1024,
          device: torch.device = None):
    if device is None:
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    set_seed(random_seed)
    print(f"Using device: {device}")
    print_gpu_memory(device, "Initial")

    if not sp.isspmatrix_csr(adata.X):
        adata.X = sp.csr_matrix(adata.X)

    if 'highly_variable' in adata.var.columns:
        adata_Vars = adata[:, adata.var['highly_variable']].copy()
    else:
        adata_Vars = adata.copy()

    if 'Spatial_Net' not in adata_Vars.uns.keys():
        if 'Spatial_Net' not in adata.uns.keys():
            raise ValueError("Spatial_Net is not found! Run Cal_Spatial_Net first!")
        else:
            adata_Vars.uns['Spatial_Net'] = adata.uns['Spatial_Net']

    adj_scipy, features_numpy = Transfer_Data(adata_Vars)
    if not sp.isspmatrix_csr(adj_scipy):
        adj_scipy = sp.csr_matrix(adj_scipy)

    g = dgl.from_scipy(adj_scipy).to(device)
    num_feats = features_numpy.shape[1]

    adj_processed_for_loss = adj_scipy.copy()
    if adj_processed_for_loss.nnz > 0:
        adj_processed_for_loss.data = np.ones_like(adj_processed_for_loss.data, dtype=np.float32)
    adj_processed_for_loss = adj_processed_for_loss - sp.diags(adj_processed_for_loss.diagonal(), offsets=0,
                                                               format='csr')
    adj_processed_for_loss.eliminate_zeros()
    adj_coo = adj_processed_for_loss.tocoo()
    adj_indices = torch.from_numpy(np.vstack((adj_coo.row, adj_coo.col))).long()
    adj_values = torch.from_numpy(adj_coo.data).float()
    adj_shape = adj_coo.shape
    adj_sparse_cpu_for_loss = torch.sparse_coo_tensor(adj_indices, adj_values, torch.Size(adj_shape),
                                                      device='cpu').coalesce()

    features = torch.FloatTensor(features_numpy).to(device)
    model = GAT(g, hidden_dims, 1, num_feats, num_hidden, [heads], F.elu, feat_drop, attn_drop, negative_slope)
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

    coords = torch.tensor(adata_Vars.obsm['spatial']).float().to(device)

    ari_max = 0.0
    N_vars = adata_Vars.shape[0]
    idx_max = np.zeros(N_vars, dtype=int)

    model.train()
    print_gpu_memory(device, "Pre-training")

    for epoch in range(n_epochs):
        t_epoch_start = time.time()
        optimizer.zero_grad(set_to_none=True)

        heads_list_for_loss, pi, disp, mean_zinb = model(features)
        concatenated_embeddings = torch.cat(heads_list_for_loss, axis=1)

        reg_loss = torch.tensor(0.0, device=device)
        n_total_items_in_dist_matrix = concatenated_embeddings.shape[0] ** 2

        with torch.no_grad():
            sp_dists_max = torch.tensor(0.0, device=device)
            for i in range(0, coords.shape[0], block_size_for_loss):
                end_i = min(i + block_size_for_loss, coords.shape[0])
                sp_dists_block = torch.cdist(coords[i:end_i], coords, p=2)
                current_max = torch.max(sp_dists_block)
                if current_max > sp_dists_max:
                    sp_dists_max = current_max

        for i in range(0, concatenated_embeddings.shape[0], block_size_for_loss):
            start_idx = i
            end_idx = min(i + block_size_for_loss, concatenated_embeddings.shape[0])

            z_dists_block = torch.cdist(concatenated_embeddings[start_idx:end_idx], concatenated_embeddings, p=2)
            sp_dists_block = torch.cdist(coords[start_idx:end_idx], coords, p=2)

            max_z_dist_block = torch.max(z_dists_block)
            if max_z_dist_block.item() > 1e-9:
                z_dists_block = torch.div(z_dists_block, max_z_dist_block)
            else:
                z_dists_block = torch.zeros_like(z_dists_block)

            if sp_dists_max.item() > 1e-9:
                sp_dists_block = torch.div(sp_dists_block, sp_dists_max)
            else:
                sp_dists_block = torch.zeros_like(sp_dists_block)

            reg_loss += F.mse_loss(z_dists_block, sp_dists_block, reduction='sum')

        if n_total_items_in_dist_matrix > 0:
            reg_loss = reg_loss / n_total_items_in_dist_matrix
        else:
            reg_loss = torch.tensor(0.0, device=device)

        zinb_loss = ZINB(pi, theta=disp, ridge_lambda=1).loss(features, mean_zinb, mean=True)

        contrastive_loss_val = multihead_contrastive_loss(
            heads_list_for_loss,
            adj_sparse_cpu_for_loss,
            tau=10.0,
            block_size_for_gpu=block_size_for_loss
        )
        total_loss = a * contrastive_loss_val + b * reg_loss + c * zinb_loss

        total_loss.backward()
        optimizer.step()
        t_epoch_end = time.time()

        # --- MODIFICATION START: Unconditional logging and cleanup ---

        # 1. Print logs for EVERY epoch
        print(f"Epoch {epoch}/{n_epochs - 1}: TotalLoss={total_loss.item():.4f} "
              f"(C={contrastive_loss_val.item():.4f}, R={reg_loss.item():.4f}, Z={zinb_loss.item():.4f}) | "
              f"Time: {t_epoch_end - t_epoch_start:.2f}s")
        print_gpu_memory(device, f"Epoch {epoch} (before cleanup)")

        # 2. Manually delete intermediate tensors to break references
        del total_loss
        del contrastive_loss_val
        del reg_loss
        del zinb_loss
        del concatenated_embeddings
        del heads_list_for_loss
        del pi, disp, mean_zinb

        # 3. Force cleanup
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

        # 4. (Optional) Print memory usage AFTER cleanup to confirm effect
        print_gpu_memory(device, f"Epoch {epoch} (after cleanup)")

        # --- MODIFICATION END ---

    model.eval()
    with torch.no_grad():
        final_heads_list, _, _, _ = model(features)
        final_concatenated_embeddings = torch.cat(final_heads_list, axis=1).cpu().detach().numpy()

    if method == "kmeans":
        if k > 0:
            actual_k_final = min(k, final_concatenated_embeddings.shape[0])
            if actual_k_final > 1:
                kmeans = KMeans(n_clusters=actual_k_final, random_state=random_seed, n_init='auto').fit(
                    np.nan_to_num(final_concatenated_embeddings))
                if 'Ground Truth' in adata_Vars.obs.columns:
                     ari_res = metrics.adjusted_rand_score(adata_Vars.obs['Ground Truth'], kmeans.labels_)
                     print(f"Final KMeans ARI: {ari_res:.4f}")
                adata.obs["cluster"] = pd.Series(kmeans.labels_, index=adata_Vars.obs.index, dtype=str).reindex(
                        adata.obs.index)

    elif method == "louvain":
        adata_tmp = sc.AnnData(final_concatenated_embeddings)
        sc.pp.neighbors(adata_tmp, n_neighbors=min(20, adata_tmp.shape[0] - 1), use_rep='X')
        sc.tl.louvain(adata_tmp, resolution=reso, random_state=random_seed)
        adata.obs["cluster"] = pd.Series(adata_tmp.obs['louvain'].values, index=adata_Vars.obs.index,
                                         dtype=str).reindex(adata.obs.index)

    adata.obsm["emb"] = final_concatenated_embeddings[adata.obs.index.get_indexer(adata_Vars.obs.index)]

    if radius != 0 and 'cluster' in adata.obs and not adata.obs['cluster'].isna().all():
        refined_labels = refine_label(adata, radius=radius, key='cluster')
        adata.obs[key_added] = pd.Series(refined_labels, index=adata.obs.index, dtype=str)
    elif 'cluster' in adata.obs:
        adata.obs[key_added] = adata.obs['cluster']

    if "emb" in adata.obsm and adata.obsm["emb"].shape[1] > 50:
        pca = PCA(n_components=50, random_state=random_seed)
        adata.obsm['emb_pca'] = pca.fit_transform(adata.obsm['emb'].copy())
    else:
        adata.obsm['emb_pca'] = adata.obsm.get('emb', np.array([])).copy()

    print_gpu_memory(device, "Final")
    return adata