from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor

import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from combat.pycombat import pycombat
from scipy.sparse import issparse
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler

from pyfeasc.utils.mca import run_mca


# 批次感知标准化
def batch_aware_standardization(adata: AnnData, batch_col: str):
	"""
	批次感知的标准化：在每个批次内进行 Z-score 标准化
	:param adata: AnnData 对象
	:param batch_col: 批次信息列名
	:return: 标准化后的 AnnData 对象
	"""
	adata_standardized = adata.copy()
	if issparse(adata.X):
		adata_standardized.X = adata.X.toarray()
	batches = adata_standardized.obs[batch_col].unique()
	for batch in batches:
		batch_idx = adata_standardized.obs[batch_col] == batch
		batch_data = adata_standardized[batch_idx, :].X
		scaler = StandardScaler()
		standardized_data = scaler.fit_transform(batch_data)
		standardized_data = np.nan_to_num(standardized_data)
		adata_standardized[batch_idx, :].X = standardized_data
	return adata_standardized


# 批次数据分割
def split_by_batch(adata: AnnData, batch_col: str = "batch"):
	"""
	按批次分裂 AnnData（保留元信息）
	:param adata: AnnData 对象
	:param batch_col: 批次信息列名
	:return: 字典，键为批次名，值为对应批次的 AnnData 对象
	"""
	if batch_col not in adata.obs.columns:
		raise ValueError(
				f"split_by_batch failed: adata.obs 中不存在批次列 '{batch_col}'")
	batch_dict = {}
	for batch_name, indices in adata.obs.groupby(batch_col, observed=True).groups.items():
		batch_adata = adata[indices, :].copy()
		batch_dict[batch_name] = batch_adata
	return batch_dict


# PCA 降维
def compute_pca(adata: AnnData, k: int = 30):
	"""
	PCA 降维
	:param adata: AnnData 对象
	:param k: 降维后的维度
	:return: 细胞数 × k 的 DataFrame，基因数 × k 的 DataFrame
	"""
	pca = PCA(n_components=k)
	if issparse(adata.X):
		data = adata.X.toarray()
	else:
		data = adata.X
	embedding = pca.fit_transform(data)
	loading = pca.components_.T
	embedding_df = pd.DataFrame(
			embedding,
			index=adata.obs_names,
			columns=[f"PC{i + 1}" for i in range(k)]
	)
	loading_df = pd.DataFrame(
			loading,
			index=adata.var_names,
			columns=[f"PC{i + 1}" for i in range(k)]
	)
	return embedding_df, loading_df


def compute_fastpca(adata: AnnData, k: int = 30):
	"""
	快速 PCA 计算，自动选择最优算法
	"""
	if issparse(adata.X) and adata.X.shape[0] > 1000:
		# 大数据集稀疏矩阵使用 TruncatedSVD
		reducer = TruncatedSVD(n_components=k)
		embedding = reducer.fit_transform(adata.X)
		loading = reducer.components_.T
	else:
		# 小数据集或稠密矩阵使用标准 PCA
		if issparse(adata.X):
			data = adata.X.toarray()
		else:
			data = adata.X
		reducer = PCA(n_components=k)
		embedding = reducer.fit_transform(data)
		loading = reducer.components_.T
	
	pc_columns = [f"PC{i + 1}" for i in range(k)]
	embedding_df = pd.DataFrame(embedding, index=adata.obs_names, columns=pc_columns)
	loading_df = pd.DataFrame(loading, index=adata.var_names, columns=pc_columns)
	
	return embedding_df, loading_df


# 获取批次间的共同基因
def get_common_genes(batch_loading_dict: dict):
	"""
	计算所有批次的共同基因
	:param batch_loading_dict: 字典，键为批次名，值为对应批次的 loading DataFrame
	:return: 共同基因列表
	"""
	all_gene_sets = [set(loading_df.index) for loading_df in batch_loading_dict.values()]
	return sorted(set.intersection(*all_gene_sets))


# 合并所有批次的共同基因 loading
def merge_batch_loadings(batch_loading_dict: dict, common_genes: list, k: int):
	"""
	合并所有批次的共同基因 loading
	:param batch_loading_dict: 字典，键为批次名，值为对应批次的 loading DataFrame
	:param common_genes: 共同基因列表
	:param k: 降维后的维度
	:return: 合并后的 loading DataFrame
	"""
	merged_list = []
	for batch_name, loading_df in batch_loading_dict.items():
		batch_loading_common = loading_df.loc[common_genes, :]
		batch_loading_common.columns = [f"{batch_name}_PC{i + 1}" for i in range(k)]
		merged_list.append(batch_loading_common)
	
	return pd.concat(merged_list, axis=1)


def merge_batch_loadings_parallel(batch_loading_dict: dict, common_genes: list, k: int):
	"""
	并行处理的合并版本
	"""
	
	def process_batch(batch_name_loading):
		batch_name, loading_df = batch_name_loading
		batch_data = loading_df.loc[common_genes].values
		columns = [f"{batch_name}_PC{j + 1}" for j in range(k)]
		return batch_data, columns, batch_name
	
	# 并行处理每个批次
	with ThreadPoolExecutor() as executor:
		results = list(executor.map(process_batch, batch_loading_dict.items()))
	all_data = []
	all_columns = []
	for batch_data, columns, _ in results:
		all_data.append(batch_data)
		all_columns.extend(columns)
	result_array = np.hstack(all_data)
	return pd.DataFrame(result_array, index=common_genes, columns=all_columns)


# 生成 mf_loading 矩阵
def generate_mf_loading(merge_loading: pd.DataFrame, C: int):
	"""
	多批次基因载荷矩阵的聚类和综合特征提取
	:param merge_loading: 合并后的 loading DataFrame（形状为 n_features × n_batches × k）
	:param C: 目标维度
	:return: C 维的综合 loading 矩阵
	"""
	kmeans = KMeans(n_clusters=C)
	cluster_labels = kmeans.fit_predict(merge_loading.T)
	sorted_indices = np.argsort(cluster_labels)
	cluster_merge_loading = merge_loading.iloc[:, sorted_indices]
	new_columns = [f"Cluster{cluster_labels[i]}_PC{col.split('_PC')[1]}"
	               for i, col in enumerate(cluster_merge_loading.columns)]
	cluster_merge_loading.columns = new_columns
	mf_loading = pd.DataFrame(
			np.zeros((len(merge_loading.index), C)),
			index=merge_loading.index,
			columns=[f"Cluster_{c}" for c in range(C)]
	)
	
	for c in range(C):
		cluster_cols = [col for col in cluster_merge_loading.columns if f"Cluster{c}_" in col]
		if not cluster_cols:
			continue
		cluster_block = cluster_merge_loading.loc[:, cluster_cols]
		if len(cluster_cols) > 1:
			# 对聚类内的成分进行 PCA 降维
			pca_block = PCA(n_components=1)
			pca_block.fit(cluster_block.T)
			mf = pca_block.components_[0]
		else:
			mf = cluster_block.values.flatten()
		mf_loading[f"Cluster_{c}"] = mf
	
	return mf_loading


def generate_mf_loading_parallel(merge_loading: pd.DataFrame, C: int):
	"""
	并行处理每个聚类的版本
	"""
	loading_array = merge_loading.values.T
	kmeans = KMeans(n_clusters=C, n_init=10)
	cluster_labels = kmeans.fit_predict(loading_array)
	n_genes = merge_loading.shape[0]
	mf_loading = np.zeros((n_genes, C))
	
	def process_cluster(c):
		cluster_indices = np.where(cluster_labels == c)[0]
		if len(cluster_indices) == 0:
			return c, None
		cluster_data = loading_array[cluster_indices].T
		if len(cluster_indices) > 1:
			# 使用TruncatedSVD处理大数据
			from sklearn.decomposition import TruncatedSVD
			svd = TruncatedSVD(n_components=1, random_state=42)
			mf = svd.fit_transform(cluster_data).flatten()
		else:
			mf = cluster_data.flatten()
		return c, mf
	
	# 并行处理每个聚类
	with ThreadPoolExecutor() as executor:
		results = list(executor.map(process_cluster, range(C)))
	for c, mf in results:
		if mf is not None:
			mf_loading[:, c] = mf
	
	return pd.DataFrame(
			mf_loading,
			index=merge_loading.index,
			columns=[f"Cluster_{c}" for c in range(C)]
	)


# 对齐嵌入空间
def align_embeddings(embedding_df: pd.DataFrame,
                     loading_df: pd.DataFrame,
                     common_genes: list,
                     mf_loading: pd.DataFrame):
	"""
	将 embedding 投影到 C 维空间
	:param embedding_df: 原始 embedding DataFrame
	:param loading_df: 原始 loading DataFrame
	:param common_genes: 共同基因列表
	:param mf_loading: C 维的综合 loading 矩阵
	:return: 对齐后的 embedding DataFrame
	"""
	aligned_loading = loading_df.loc[common_genes, :]
	projection = np.linalg.pinv(aligned_loading.values) @ mf_loading.values
	projected_embedding = embedding_df.values @ projection
	
	return pd.DataFrame(
			projected_embedding,
			index=embedding_df.index,
			columns=mf_loading.columns
	)


def align_embeddings_fast(embedding_df: pd.DataFrame,
                          loading_df: pd.DataFrame,
                          common_genes: list,
                          mf_loading: pd.DataFrame):
	"""
	快速对齐版本
	"""
	embedding_array = embedding_df.values
	mf_loading_array = mf_loading.values
	aligned_loading_array = loading_df.loc[common_genes].values
	
	try:
		# lstsq 比 pinv 更稳定且更快
		projection, residuals, rank, s = np.linalg.lstsq(
				aligned_loading_array, mf_loading_array, rcond=None
		)
	except np.linalg.LinAlgError:
		# 如果最小二乘失败，使用伪逆
		projection = np.linalg.pinv(aligned_loading_array) @ mf_loading_array
	
	projected_embedding = embedding_array @ projection
	
	return pd.DataFrame(
			projected_embedding,
			index=embedding_df.index,
			columns=mf_loading.columns
	)


def align_embeddings_gpu(embedding_df: pd.DataFrame,
                         loading_df: pd.DataFrame,
                         common_genes: list,
                         mf_loading: pd.DataFrame):
	"""
	GPU 加速版本
	"""
	try:
		import cupy as cp
		# 转移到 GPU
		embedding_gpu = cp.array(embedding_df.values)
		aligned_loading_gpu = cp.array(loading_df.loc[common_genes].values)
		mf_loading_gpu = cp.array(mf_loading.values)
		# GPU 上的伪逆和矩阵乘法
		projection_gpu = cp.linalg.pinv(aligned_loading_gpu) @ mf_loading_gpu
		projected_embedding_gpu = embedding_gpu @ projection_gpu
		# 转移回 CPU
		projected_embedding = cp.asnumpy(projected_embedding_gpu)
	except ImportError:
		# 回退到 CPU 版本
		return align_embeddings_fast(embedding_df, loading_df, common_genes, mf_loading)
	
	return pd.DataFrame(
			projected_embedding,
			index=embedding_df.index,
			columns=mf_loading.columns
	)


# 经验贝叶斯模型（ComBat）校正
def apply_combat_correction(feature_matrix: pd.DataFrame, batch_info: pd.Series):
	"""
	经验贝叶斯模型（ComBat）校正
	:param feature_matrix: 特征矩阵，每行是一个细胞，每列是一个特征
	:param batch_info: 批次信息，每行对应于 feature_matrix 的行，每列是一个批次
	:return: 校正后的特征矩阵
	"""
	batch_info = batch_info.loc[feature_matrix.index]
	data_df = feature_matrix.T
	corrected_data = pycombat(data_df, batch_info.values)
	corrected_feature_matrix = corrected_data.T
	
	return pd.DataFrame(corrected_feature_matrix,
	                    index=feature_matrix.index,
	                    columns=feature_matrix.columns)


# 通过均值中心化方法校正批次效应
def batch_mean_alignment(embedding: np.ndarray, batch_labels: np.ndarray):
	"""
	通过均值中心化方法校正批次效应
	:param embedding: 原始嵌入矩阵，形状为 (n_cells, n_features)
	:param batch_labels: 批次标签，形状为 (n_cells,)
	:return: 校正后的嵌入矩阵（校正后数据 = 原始数据 - 批次中心 + 全局中心）
	"""
	global_mean = np.mean(embedding, axis=0)
	corrected_embedding = np.zeros_like(embedding)
	for batch in np.unique(batch_labels):
		batch_idx = batch_labels == batch
		batch_data = embedding[batch_idx]
		batch_mean = np.mean(batch_data, axis=0)
		corrected_embedding[batch_idx] = batch_data - batch_mean + global_mean
	return corrected_embedding


def apply_combat_correction_parallel(feature_matrix: pd.DataFrame,
                                     batch_info: pd.Series,
                                     n_jobs: int = 4,
                                     chunk_size: int = 500):
	"""
	并行处理版本
	"""
	batch_info = batch_info.loc[feature_matrix.index]
	n_features = feature_matrix.shape[1]
	
	# 分割特征为多个 chunk
	feature_chunks = []
	for i in range(0, n_features, chunk_size):
		end_idx = min(i + chunk_size, n_features)
		chunk = feature_matrix.iloc[:, i:end_idx]
		feature_chunks.append(chunk)
	
	def process_chunk(chunk):
		try:
			data_df = chunk.T
			corrected = pycombat(data_df, batch_info.values)
			return corrected.T
		except Exception as e:
			print(f"Chunk处理失败: {str(e)}")
			return chunk.values
	
	# 并行处理
	with ProcessPoolExecutor(max_workers=n_jobs) as executor:
		results = list(executor.map(process_chunk, feature_chunks))
	corrected_matrix = np.hstack(results)
	
	return pd.DataFrame(
			corrected_matrix,
			index=feature_matrix.index,
			columns=feature_matrix.columns
	)


# 主流程函数
def meta_feature_integration(adata: AnnData,
                             n_comps=25,
                             batch_col: str = "batch",
                             method: str = 'pca',
                             apply_combat: bool = True):
	"""
	主流程函数：返回 mf_embedding 和 mf_loading，包含批次效应去除
	:param adata: AnnData 对象
	:param n_comps: 降维后的维度
	:param batch_col: 批次信息列名
	:param method: 降维方法，可选 'pca', 'mca'
	:param apply_combat: 是否应用 ComBat 进行批次效应校正
	:return: mf_embedding 和 mf_loading
	"""
	# 步骤 0: 批次感知标准化 (pca)
	if method == 'pca':
		sc.pp.normalize_total(adata, target_sum=1e4)
		sc.pp.log1p(adata)
		adata_std = batch_aware_standardization(adata, batch_col)
	elif method == 'mca':
		adata_std = adata.copy()
	else:
		raise ValueError(f"line {inspect.currentframe().f_lineno}: harmony; Invalid method.")
	
	# 步骤 1: 按批次分裂数据
	batch_dict = split_by_batch(adata_std, batch_col)
	
	# 步骤 2: 批次级分解（返回各批次的 embedding 和 loading）
	batch_embedding, batch_loading = {}, {}
	for batch_name, batch_adata in batch_dict.items():
		if method == 'pca':
			embedding_df, loading_df = compute_pca(batch_adata, k=n_comps)
		elif method == 'mca':
			embedding_df, loading_df, _ = run_mca(batch_adata, nmcs=n_comps, meta=True)
		else:
			raise ValueError(f" line {inspect.currentframe().f_lineno}: big_harmony; Invalid method.")
		batch_embedding[batch_name] = embedding_df
		batch_loading[batch_name] = loading_df
	
	# 步骤 3: 计算共同基因
	common_genes = get_common_genes(batch_loading)
	if not common_genes:
		raise ValueError(f"line {inspect.currentframe().f_lineno}: harmony; 各批次无共同基因，无法继续分析！")
	
	# 步骤 4: 合并批次 loading
	merge_loading = merge_batch_loadings(batch_loading, common_genes, n_comps)
	
	# 步骤 5: 生成 mf_loading 矩阵
	mf_loading = generate_mf_loading(merge_loading, n_comps)
	
	# 步骤 6: 对齐各批次特征
	aligned_features_dict = {}
	for batch_name, embedding_df in batch_embedding.items():
		aligned_features = align_embeddings(
				embedding_df,
				batch_loading[batch_name],
				common_genes,
				mf_loading
		)
		# aligned_features = align_embeddings_gpu(embedding_df, batch_loading[batch_name], common_genes, mf_loading)
		aligned_features_dict[batch_name] = aligned_features
	# 合并所有批次的嵌入
	mf_embedding = pd.concat(aligned_features_dict.values(), axis=0)
	
	# 步骤 7: ComBat 校正
	if apply_combat:
		batch_info = adata.obs[batch_col]
		mf_embedding = apply_combat_correction(mf_embedding, batch_info)
	
	return mf_embedding, mf_loading


# 主流程测试
if __name__ == '__main__':
	path_gse = "../../data/input/anndata/GSE96583.h5ad"
	# 读取数据
	adata = sc.read_h5ad(path_gse)
	adata.var_names_make_unique()
	
	sc.pp.filter_cells(adata, min_genes=50)
	sc.pp.filter_genes(adata, min_cells=3)
	sc.pp.highly_variable_genes(adata, n_top_genes=3000, flavor="seurat_v3")
	adata = adata[:, adata.var.highly_variable]
	
	import time
	
	# 开始时间
	start_time = time.time()
	# 运行主流程，获取所有结果
	mf_embedding, mf_loading = meta_feature_integration(adata, batch_col="stim", method="pca")
	# 用于下游分析
	adata.obsm["X_integrated"] = mf_embedding
	# 结束时间
	end_time = time.time()
	print(f"Time cost: {end_time - start_time:.2f}s")

# # 可视化
# sc.pp.neighbors(adata, use_rep="X_integrated")
# sc.tl.umap(adata)
# sc.pl.umap(adata, color=["stim", "cell"])
