import numpy as np
import numba
import scipy.sparse as sp
from munkres import Munkres
from collections import Counter
import torch
import sys
import ot
import pandas as pd
import numpy as np
import sklearn.neighbors
import torch.nn.functional as F
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA


# 错误处理装饰器：用于捕获函数执行中的异常
def error_handler(func):
    def wrapper(*args, **kwargs):
        try:
            return func(*args, **kwargs)
        except Exception as e:
            print(f"错误发生在函数 {func.__name__}: {str(e)}")
            raise  # 重新抛出异常，便于上层处理

    return wrapper


@numba.njit("f4(f4[:], f4[:])")
def euclid_dist(t1, t2):
    sum = 0
    for i in range(t1.shape[0]):
        sum += (t1[i] - t2[i]) ** 2
    return np.sqrt(sum)


@numba.njit("f4[:,:](f4[:,:])", parallel=True, nogil=True)
def pairwise_distance(X):
    """计算所有点对之间的欧氏距离"""
    n = X.shape[0]
    adj = np.empty((n, n), dtype=np.float32)
    for i in numba.prange(n):
        for j in numba.prange(n):
            adj[i][j] = euclid_dist(X[i], X[j])
    return adj


@error_handler
def calculate_adj_matrix(adata):
    """计算邻接矩阵，在调用Numba函数前进行输入校验"""
    # 校验adata是否包含必要的观测值
    if 'array_row' not in adata.obs or 'array_col' not in adata.obs:
        raise ValueError("adata.obs必须包含'array_row'和'array_col'")

    print("计算邻接矩阵...")
    x = adata.obs["array_row"]
    y = adata.obs["array_col"]
    X = np.array([x, y]).T.astype(np.float32)

    # 在调用Numba函数前进行输入校验
    if not isinstance(X, np.ndarray):
        raise TypeError("输入X必须是numpy数组")
    if X.ndim != 2:
        raise ValueError("输入X必须是二维数组")
    if X.shape[0] == 0 or X.shape[1] == 0:
        raise ValueError("输入X不能包含空维度")

    adj = pairwise_distance(X)
    return adj


def _nan2zero(x):
    return torch.where(torch.isnan(x), torch.zeros_like(x), x)


def _nan2inf(x):
    return torch.where(torch.isnan(x), torch.zeros_like(x) + np.inf, x)

def compute_joint(view1, view2):
    """Compute the joint probability matrix P"""
    bn, k = view1.size()
    assert (view2.size(0) == bn and view2.size(1) == k)

    p_i_j = view1.unsqueeze(2) * view2.unsqueeze(1)
    p_i_j = p_i_j.sum(dim=0)
    p_i_j = (p_i_j + p_i_j.t()) / 2.  # symmetrise
    p_i_j = p_i_j / p_i_j.sum()  # normalise

    return p_i_j


def consistency_loss(emb1, emb2):
    emb1 = emb1 - torch.mean(emb1, dim=0, keepdim=True)
    emb2 = emb2 - torch.mean(emb2, dim=0, keepdim=True)
    emb1 = torch.nn.functional.normalize(emb1, p=2, dim=1)
    emb2 = torch.nn.functional.normalize(emb2, p=2, dim=1)
    cov1 = torch.matmul(emb1, emb1.t())
    cov2 = torch.matmul(emb2, emb2.t())
    return torch.mean((cov1 - cov2) ** 2)


def crossview_contrastive_Loss(view1, view2, lamb=9.0, EPS=sys.float_info.epsilon):
    """Contrastive loss for maximizng the consistency"""
    _, k = view1.size()
    p_i_j = compute_joint(view1, view2)
    assert (p_i_j.size() == (k, k))

    p_i = p_i_j.sum(dim=1).view(k, 1).expand(k, k)
    p_j = p_i_j.sum(dim=0).view(1, k).expand(k, k)

    # Works with pytorch > 1.2
    p_i_j = torch.where(p_i_j < EPS, torch.tensor([EPS], device=p_i_j.device), p_i_j)
    p_j = torch.where(p_j < EPS, torch.tensor([EPS], device=p_j.device), p_j)
    p_i = torch.where(p_i < EPS, torch.tensor([EPS], device=p_i.device), p_i)

    loss = - p_i_j * (torch.log(p_i_j) \
                      - (lamb + 1) * torch.log(p_j) \
                      - (lamb + 1) * torch.log(p_i))

    loss = loss.sum()

    return loss * -1


def cosine_similarity(emb):
    mat = torch.matmul(emb, emb.T)
    norm = torch.norm(emb, p=2, dim=1).reshape((emb.shape[0], 1))
    mat = torch.div(mat, torch.matmul(norm, norm.T))
    if torch.any(torch.isnan(mat)):
        mat = _nan2zero(mat)
    mat = mat - torch.diag_embed(torch.diag(mat))
    return mat


def regularization_loss(emb, adj):
    mat = torch.sigmoid(cosine_similarity(emb))  # .cpu()
    loss = torch.mean((mat - adj) ** 2)
    return loss


@error_handler
def refine_label(adata, radius=50, key='cluster'):
    """
    优化细胞标签，添加进度条和参数校验
    """
    # 参数校验
    if 'spatial' not in adata.obsm:
        raise ValueError("adata.obsm必须包含'spatial'坐标")
    if key not in adata.obs:
        raise ValueError(f"adata.obs中不存在'{key}'标签")
    if radius <= 0:
        raise ValueError("radius必须大于0")

    print(f"优化标签（radius={radius}）...")
    n_neigh = radius
    new_type = []
    old_type = adata.obs[key].values

    # calculate distance
    position = adata.obsm['spatial']

    # 使用tqdm显示距离计算进度
    print("计算细胞间距离...")
    distance = ot.dist(position, position, metric='euclidean')
    n_cell = distance.shape[0]

    # 使用tqdm显示标签优化进度
    print("优化每个细胞的标签...")
    for i in tqdm(range(n_cell), desc="处理细胞"):
        vec = distance[i, :]
        index = vec.argsort()
        neigh_type = []
        for j in range(1, n_neigh + 1):
            neigh_type.append(old_type[index[j]])
        max_type = max(neigh_type, key=neigh_type.count)
        new_type.append(max_type)

    new_type = [str(i) for i in list(new_type)]
    return new_type


def munkres_newlabel(y_true, y_pred):
    """\
     Kuhn-Munkres算法实现标签映射，添加输入校验
    """
    # 输入校验
    if len(y_true) != len(y_pred):
        raise ValueError("y_true和y_pred的长度必须一致")
    if np.min(y_true) < 0 or np.min(y_pred) < 0:
        raise ValueError("标签值不能为负数")

    y_true = y_true - np.min(y_true)
    l1 = list(set(y_true))
    numclass1 = len(l1)
    l2 = list(set(y_pred))
    numclass2 = len(l2)
    ind = 0
    if numclass1 != numclass2:
        for i in l1:
            if i in l2:
                pass
            else:
                y_pred[ind] = i
                ind += 1

    l2 = list(set(y_pred))
    numclass2 = len(l2)

    if numclass1 != numclass2:
        print('错误：标签类别数量不匹配，无法进行映射')
        return 0, 0, 0

    cost = np.zeros((numclass1, numclass2), dtype=int)
    for i, c1 in enumerate(l1):
        mps = [i1 for i1, e1 in enumerate(y_true) if e1 == c1]
        for j, c2 in enumerate(l2):
            mps_d = [i1 for i1 in mps if y_pred[i1] == c2]
            cost[i][j] = len(mps_d)

    # match two clustering results by Munkres algorithm
    m = Munkres()
    cost = cost.__neg__().tolist()
    indexes = m.compute(cost)

    # get the match results
    new_predict = np.zeros(len(y_pred))
    for i, c in enumerate(l1):
        # correponding label in l2:
        c2 = l2[indexes[i][1]]

        # ai is the index with label==c2 in the pred_label list
        ai = [ind for ind, elm in enumerate(y_pred) if elm == c2]
        new_predict[ai] = c

    print('Counter(new_predict)\n', Counter(new_predict))
    print('Counter(y_true)\n', Counter(y_true))

    return new_predict


@error_handler
def Cal_Spatial_Net(adata, rad_cutoff=None, k_cutoff=None, model='Radius', Spatial_uns="Spatial_Net"):
    """
    构建空间邻接网络，添加进度条和参数校验
    """
    # 参数校验
    if model not in ['Radius', 'KNN']:
        raise ValueError("model必须为'Radius'或'KNN'")
    if model == 'Radius' and rad_cutoff is None:
        raise ValueError("使用Radius模型时必须指定rad_cutoff")
    if model == 'KNN' and k_cutoff is None:
        raise ValueError("使用KNN模型时必须指定k_cutoff")
    if 'spatial' not in adata.obsm:
        raise ValueError("adata.obsm必须包含'spatial'坐标")

    print('------Calculating spatial graph...')
    coor = pd.DataFrame(adata.obsm['spatial'])
    coor.index = adata.obs.index
    coor.columns = ['imagerow', 'imagecol']

    if model == 'Radius':
        print(f"使用Radius模型，半径截止值: {rad_cutoff}")
        nbrs = sklearn.neighbors.NearestNeighbors(radius=rad_cutoff).fit(coor)
        distances, indices = nbrs.radius_neighbors(coor, return_distance=True)
        KNN_list = []
        # 使用tqdm显示邻居搜索进度
        for it in tqdm(range(indices.shape[0]), desc="处理细胞邻居"):
            KNN_list.append(pd.DataFrame(zip([it] * indices[it].shape[0], indices[it], distances[it])))

    if model == 'KNN':
        print(f"使用KNN模型，邻居数量: {k_cutoff}")
        nbrs = sklearn.neighbors.NearestNeighbors(n_neighbors=k_cutoff + 1).fit(coor)
        distances, indices = nbrs.kneighbors(coor)
        KNN_list = []
        # 使用tqdm显示邻居搜索进度
        for it in tqdm(range(indices.shape[0]), desc="处理细胞邻居"):
            KNN_list.append(pd.DataFrame(zip([it] * indices.shape[1], indices[it, :], distances[it, :])))

    KNN_df = pd.concat(KNN_list)
    KNN_df.columns = ['Cell1', 'Cell2', 'Distance']

    Spatial_Net = KNN_df.copy()
    Spatial_Net = Spatial_Net.loc[Spatial_Net['Distance'] > 0,]
    id_cell_trans = dict(zip(range(coor.shape[0]), np.array(coor.index), ))
    Spatial_Net['Cell1'] = Spatial_Net['Cell1'].map(id_cell_trans)
    Spatial_Net['Cell2'] = Spatial_Net['Cell2'].map(id_cell_trans)

    adata.uns[Spatial_uns] = Spatial_Net
    print(f"空间邻接网络构建完成，包含{len(Spatial_Net)}条边")


def Transfer_Data(adata):
    """转换数据格式，添加存在性校验"""
    # 校验Spatial_Net是否存在
    if 'Spatial_Net' not in adata.uns:
        raise ValueError("adata.uns中不存在'Spatial_Net'，请先运行Cal_Spatial_Net")

    G_df = adata.uns['Spatial_Net'].copy()
    cells = np.array(adata.obs_names)
    cells_id_tran = dict(zip(cells, range(cells.shape[0])))
    G_df['Cell1'] = G_df['Cell1'].map(cells_id_tran)
    G_df['Cell2'] = G_df['Cell2'].map(cells_id_tran)

    G = sp.coo_matrix((np.ones(G_df.shape[0]), (G_df['Cell1'], G_df['Cell2'])), shape=(adata.n_obs, adata.n_obs))
    G = G + sp.eye(G.shape[0])

    return G, adata.X.todense()


class NB(object):
    def __init__(self, theta=None, scale_factor=1.0):
        super(NB, self).__init__()
        self.eps = 1e-10
        self.scale_factor = scale_factor
        self.theta = theta

    def loss(self, y_true, y_pred, mean=True):
        y_pred = y_pred * self.scale_factor
        theta = torch.minimum(self.theta, torch.tensor(1e6))
        t1 = torch.lgamma(theta + self.eps) + torch.lgamma(y_true + 1.0) - torch.lgamma(y_true + theta + self.eps)
        t2 = (theta + y_true) * torch.log(1.0 + (y_pred / (theta + self.eps))) + (
                y_true * (torch.log(theta + self.eps) - torch.log(y_pred + self.eps)))
        final = t1 + t2
        final = _nan2inf(final)
        if mean:
            final = torch.mean(final)
        return final


class ZINB(NB):
    def __init__(self, pi, ridge_lambda=0.0, **kwargs):
        super().__init__(**kwargs)
        self.pi = pi
        self.ridge_lambda = ridge_lambda

    def loss(self, y_true, y_pred, mean=True):
        scale_factor = self.scale_factor
        eps = self.eps
        theta = torch.minimum(self.theta, torch.tensor(1e6))
        nb_case = super().loss(y_true, y_pred, mean=False) - torch.log(1.0 - self.pi + eps)
        y_pred = y_pred * scale_factor
        zero_nb = torch.pow(theta / (theta + y_pred + eps), theta)
        zero_case = -torch.log(self.pi + ((1.0 - self.pi) * zero_nb) + eps)
        result = torch.where(torch.lt(y_true, 1e-8), zero_case, nb_case)
        ridge = self.ridge_lambda * torch.square(self.pi)
        result += ridge
        if mean:
            result = torch.mean(result)
        result = _nan2inf(result)
        return result


class decoder(torch.nn.Module):
    def __init__(self, nfeat, nhid1, nhid2):
        super(decoder, self).__init__()
        self.decoder = torch.nn.Sequential(
            torch.nn.Linear(nfeat, nhid1),
            torch.nn.BatchNorm1d(nhid1),
            torch.nn.ReLU()
        )
        self.pi = torch.nn.Linear(nhid1, nhid2)
        self.disp = torch.nn.Linear(nhid1, nhid2)
        self.mean = torch.nn.Linear(nhid1, nhid2)
        self.DispAct = lambda x: torch.clamp(F.softplus(x), 1e-4, 1e4)
        self.MeanAct = lambda x: torch.clamp(torch.exp(x), 1e-5, 1e6)

    def forward(self, emb):
        x = self.decoder(emb)
        pi = torch.sigmoid(self.pi(x))
        disp = self.DispAct(self.disp(x))
        mean = self.MeanAct(self.mean(x))
        return [pi, disp, mean]

    def refine_label(adata, radius=50, key='cluster'):
        n_neigh = radius
        new_type = []
        old_type = adata.obs[key].values

        # calculate distance
        position = adata.obsm['spatial']
        distance = ot.dist(position, position, metric='euclidean')
        n_cell = distance.shape[0]

        for i in range(n_cell):
            vec = distance[i, :]
            index = vec.argsort()
            neigh_type = []
            for j in range(1, n_neigh + 1):
                neigh_type.append(old_type[index[j]])
            max_type = max(neigh_type, key=neigh_type.count)
            new_type.append(max_type)

        new_type = [str(i) for i in list(new_type)]
        return new_type


class NB(object):
    def __init__(self, theta=None, scale_factor=1.0):
        super(NB, self).__init__()
        self.eps = 1e-10
        self.scale_factor = scale_factor
        self.theta = theta

        # 缓存设备信息，确保所有操作在同一设备上
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    def loss(self, y_true, y_pred, mean=True):
        # 确保所有张量在同一设备上
        y_pred = y_pred.to(self.device) * self.scale_factor
        theta = torch.minimum(self.theta.to(self.device), torch.tensor(1e6, device=self.device))

        # 数值稳定性改进：使用log-sum-exp技巧
        t1 = torch.lgamma(theta + self.eps) + torch.lgamma(y_true.to(self.device) + 1.0) - torch.lgamma(
            y_true.to(self.device) + theta + self.eps)

        # 分解计算以避免数值溢出
        log_term = torch.log(1.0 + (y_pred / (theta + self.eps)))
        t2 = (theta + y_true.to(self.device)) * log_term + (
                y_true.to(self.device) * (torch.log(theta + self.eps) - torch.log(y_pred + self.eps))
        )

        final = t1 + t2
        final = _nan2inf(final)

        if mean:
            final = torch.mean(final)

        return final


class Tweedie(NB):
    def __init__(self, p=1.5, ridge_lambda=0.0, **kwargs):
        """
        Tweedie分布实现，继承自NB类

        参数:
            p: Tweedie指数 (1 < p < 2)，控制分布特性
            ridge_lambda: L2正则化强度
        """
        super().__init__(**kwargs)
        # 确保p是单元素张量，并固定为不可训练
        self.p = torch.tensor(p, device=self.device, dtype=torch.float32, requires_grad=False)
        self.ridge_lambda = ridge_lambda
        self.eps = 1e-10  # 数值稳定性参数

    def loss(self, y_true, y_pred, mean=True):
        """
        计算Tweedie损失（修正符号问题）

        参数:
            y_true: 真实观测值
            y_pred: 预测值（均值）
            mean: 是否返回均值损失

        返回:
            Tweedie损失值（非负值）
        """
        # 确保所有张量在同一设备上
        y_true = y_true.to(self.device)
        y_pred = y_pred.to(self.device)

        # 应用缩放因子
        y_pred_scaled = y_pred * self.scale_factor

        # 数值稳定性处理
        y_pred_scaled = torch.clamp(y_pred_scaled, min=self.eps)
        y_true = torch.clamp(y_true, min=self.eps)

        # 计算Tweedie损失核心公式（添加负号修正符号）
        term1 = y_true * y_pred_scaled ** (1 - self.p)
        term2 = (2 - self.p) / (1 - self.p) * y_pred_scaled ** (2 - self.p)
        tweedie_loss = - (term1 - term2) / ((1 - self.p) * (2 - self.p))  # 关键修改：添加负号

        # 添加L2正则化
        if self.ridge_lambda > 0:
            ridge = self.ridge_lambda * torch.square(self.theta)
            tweedie_loss += ridge

        # 处理零值（特殊情况）
        zero_mask = (y_true < self.eps).float()

        # 零值处理（使用张量操作，避免标量转换）
        # 直接使用广播机制判断p < 2
        p_less_than_2 = (self.p < 2.0).float().view(1, 1)  # 确保维度适配
        zero_prob = torch.exp(-y_pred_scaled)
        zero_loss = -torch.log(zero_prob + self.eps)

        # 组合零值损失和非零值损失
        tweedie_loss = (zero_mask * zero_loss + (1 - zero_mask) * tweedie_loss) * p_less_than_2 + tweedie_loss * (
                    1 - p_less_than_2)

        # 处理NaN和Inf
        tweedie_loss = _nan2inf(tweedie_loss)

        # 返回均值或总和
        if mean:
            return torch.mean(tweedie_loss)
        else:
            return torch.sum(tweedie_loss)