# -*- coding: utf-8 -*-
# Author       : Xinwu
# Email        : lexinwu@outlook.com
# Describe     : scanpy for scRNA analysis
# Created Time : 2023-06-14 15:40:17
# Last Modified: 2023-06-28 15:28:12

import os
import scvi
import logging
import argparse
import anndata as ad
import scanpy as sc
import numpy as np
import pandas as pd
import external_function as ef
import matplotlib.pyplot as plt
from parse_config import parse_json_config, print_config
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(message)s',level = logging.INFO)
script_path = os.path.split(os.path.realpath(__file__))[0]

def get_opt():
	parser = argparse.ArgumentParser()
	parser.add_argument("--outdir", type = str, required = True, action = "store", help = "分析结果输出路径")
	parser.add_argument("--config_file", type = str, required = False, action = "store",
						help = "配置文件, 默认使用脚本路径下的 config_scanpy_pipeline.json 文件",
						default = f'{script_path}/config_scanpy_pipeline.json')
	parser.add_argument("--tax", type = str, required = True, action = "store", help = "物种名称, eg: human, mouse,...")
	parser.add_argument("--infile", type = str, required = True, action = "store", help = "输入文件, 仅支持cell*gene的mtx矩阵 或 h5ad 格式的 adata 文件")
	parser.add_argument("--data_type", type = str, required = False, action = "store", help = "输入文件的格式, mtx 或者 h5ad, Default: mtx",
						choices = ["mtx","h5ad","10x_mtx","10x_h5"], default = "mtx")
	parser.add_argument("--sample_list", type = str, required = True, action = "store", help = "样本名list, 逗号分隔")
	parser.add_argument("--integrate_method", type = str, required = False, action = "store", help = "多样本整合时去批次方式, Default: null", 
						choices = ["harmony","bbknn","scanorama","combat","scvi","null"], default = "null")
	parser.add_argument("--min_genes", type = int, required = False, action = "store", help = "每个细胞表达基因数目的下限, Default: 200", default = 200)
	parser.add_argument("--max_genes", type = int, required = False, action = "store", help = "每个细胞表达基因数目的上限, Default: 10000", default = 10000)	
	parser.add_argument("--min_cells", type = int, required = False, action = "store", help = "每个基因在细胞中检出数目的下限, Default: 3", default = 3)	
	parser.add_argument("--mito_cutoff", type = float, required = False, action = "store", help = "每个细胞线粒体基因表达含量的阈值, Default: 10", default = 10)
	parser.add_argument("--redcell_cutoff", type = float, required = False, action = "store", help = "每个细胞红细胞标记基因表达含量的上限, Default: 10", default = 10)
	parser.add_argument("--ribo_cutoff", type = float, required = False, action = "store", help = "每个细胞核糖体标记基因表达含量的上限, Default: 75", default = 75)
	parser.add_argument("--doublet_filter", type = str, required = False, action = "store", help = "是否过滤双细胞, Default: False", default = "False", choices = ['True','False'])
	parser.add_argument("--expected_doublet_rate", type = float, required = False, action = "store", help = "预估双细胞率, Default: 0.06", default = 0.06)
	parser.add_argument("--pcs_number", type = int, required = False, action = "store", help = "PCA 降维数: Default: 30", default = 30)
	parser.add_argument("--resolution", type = str, required = False, action = "store", help = "聚类时的 resolution, 可指定多个,使用逗号分隔即可, Default: 0.9", default = "0.9")
	# parser.add_argument("--",    type = , required = True, action = "store", help = "", default = )					
	args = parser.parse_args()
	return args

# create anndata object from mtx(gene x cell) or cellranger filtered_feature_bc_matrix or h5ad file
def create_obj(infile,data_type,samples):
	if data_type == "mtx":
		file_type = config_dict['flexible_parameters']['file_type']
		if file_type == "table":
			adata = sc.read_csv(infile,delimiter = '\t').T
		elif file_type == "csv":
			adata = sc.read_csv(infile,delimiter = ',').T
		else:
			raise Exception(logger.error("gene x cell expr matrix file_type must be table or csv, please check. "))
	elif data_type == "10x_mtx":
		adata = sc.read_10x_mtx(infile,var_names = "gene_symbols",cache = False)
	elif data_type == "10x_h5":
		sc.read_10x_h5(infile)
	elif data_type == "h5ad":
		adata = sc.read_h5ad(infile)
	else:
		raise Exception(logger.error("data_type must be mtx or h5ad or 10x_mtx or 10x_h5, please check. "))

	adata.var_names_make_unique()
	adata.obs_names_make_unique()
	if 'Sample' in adata.obs.columns:
		logger.info("Sample str is already in adata.obs columns")
	else:
		adata.obs['Sample'] = ""
		for i in range(len(samples)):
			num = i + 1
			adata.obs.loc[adata.obs_names.str.endswith(f"-{num}"),"Sample"] = samples[i]
	return adata

def filter_data(adata,samples):
	mt_gene = config_dict['gene_list'][args.tax]['mtRNA']
	rRNA_gene = config_dict['gene_list'][args.tax]['rRNA']
	redcell_gene = config_dict['gene_list'][args.tax]['redcell']
	if len(mt_gene) > 0:
		adata.var['mt'] = adata.var_names.isin(mt_gene)
		sc.pp.calculate_qc_metrics(adata, qc_vars = ['mt'], percent_top = None, log1p = False, inplace = True)
	if len(rRNA_gene) > 0:
		adata.var['ribo'] = adata.var_names.isin(rRNA_gene)
		sc.pp.calculate_qc_metrics(adata, qc_vars = ['ribo'], percent_top = None, log1p = False, inplace = True)
	if len(redcell_gene) > 0:
		adata.var['redcell'] = adata.var_names.isin(redcell_gene)
		sc.pp.calculate_qc_metrics(adata, qc_vars = ['redcell'], percent_top = None, log1p = False, inplace = True)
	feature_list = ['total_counts','n_genes_by_counts','pct_counts_mt','pct_counts_ribo','pct_counts_redcell']
	feature_list = [j for j in feature_list if j in adata.obs.columns]
	fig,ax = plt.subplots(figsize=(2*len(feature_list),6))
	sc.pl.violin(adata, feature_list, jitter = 0.4, multi_panel = True, show = False, ax = ax)
	plt.savefig(f'{args.outdir}/QC_before_filter.pdf')	
	if args.doublet_filter == 'True':
		batch_key = config_dict['flexible_parameters']['bacth_key']
		doublet_method = config_dict['flexible_parameters']['doublet_method']
		if doublet_method == 'scrublet':
			sc.external.pp.scrublet(adata, batch_key = batch_key, expected_doublet_rate = args.expected_doublet_rate)
		else:
			adata = ef.scvi_doublet_predict(adata = adata, batch_key = batch_key, doublet_score = 1)
	meta_data = pd.DataFrame(adata.obs)
	meta_data = meta_data.rename_axis('Barcode').reset_index()
	meta_data.to_csv(f'{args.outdir}/Barcode_Infor.xls', index = False, sep = "\t")
	sc.pp.filter_cells(adata, min_genes = args.min_genes)
	sc.pp.filter_cells(adata, max_genes = args.max_genes)
	sc.pp.filter_genes(adata, min_cells = args.min_cells)
	if len(mt_gene) > 0:
		adata = adata[adata.obs.pct_counts_mt < args.mito_cutoff]
	if len(rRNA_gene) > 0:
		adata = adata[adata.obs.pct_counts_ribo < args.ribo_cutoff]
	if len(redcell_gene) > 0:
		adata = adata[adata.obs.pct_counts_redcell < args.redcell_cutoff]
	if args.doublet_filter == 'True':
		adata = adata[~adata.obs.predicted_doublet]
	fig,ax = plt.subplots(figsize=(2*len(feature_list),6))
	sc.pl.violin(adata, feature_list, jitter = 0.4, multi_panel = True, show = False, ax = ax)
	plt.savefig(f'{args.outdir}/QC_after_filtered.pdf')
	adata.raw = adata  # freeze the state in `.raw`, can use adata.raw.to_adata() to recover
	adata.layers["counts"] = adata.X.copy()  # preserve counts, this will not change
	return adata

def remove_batch_effect(adata, integrate_method, pcs_number):
	batch_key = config_dict['flexible_parameters']['bacth_key']
	hvg_num = config_dict['flexible_parameters']['hvg_num']
	sc.pp.normalize_total(adata, target_sum = 1e4)
	#sc.pp.regress_out(adata, ['total_counts', 'pct_counts_mt']) # 是否去除非期望来源的方差对数据的影响
	sc.pp.log1p(adata)
	if integrate_method == "combat":
		sc.pp.combat(adata, key = batch_key)
		sc.pp.highly_variable_genes(adata, n_top_genes = hvg_num, batch_key = batch_key)
		sc.pp.scale(adata, max_value = 10)
		sc.tl.pca(adata, use_highly_variable = True, n_comps = pcs_number)
		sc.pp.neighbors(adata, n_pcs = pcs_number, n_neighbors = 10, use_rep = "X_pca")
		sc.tl.umap(adata)
		sc.tl.tsne(adata, n_pcs = pcs_number, use_rep = "X_pca")

	elif integrate_method == "bbknn":
		sc.pp.highly_variable_genes(adata, n_top_genes = hvg_num, batch_key = batch_key)
		sc.pp.scale(adata, max_value = 10)
		sc.tl.pca(adata, use_highly_variable = True, n_comps = pcs_number)
		sc.external.pp.bbknn(adata, batch_key = batch_key, use_rep = "X_pca", n_pcs = pcs_number)
		sc.pp.neighbors(adata, n_pcs = pcs_number, n_neighbors = 10, use_rep = "X_pca")
		sc.tl.umap(adata)
		sc.tl.tsne(adata, n_pcs = pcs_number, use_rep = "X_pca")

	elif integrate_method == "scanorama":
		sc.pp.highly_variable_genes(adata, n_top_genes = hvg_num, batch_key = batch_key)
		sc.pp.scale(adata, max_value = 10)
		sc.tl.pca(adata, use_highly_variable = True, n_comps = pcs_number)
		sc.external.pp.scanorama_integrate(adata, key = batch_key, basis = "X_pca")
		sc.pp.neighbors(adata, n_pcs = pcs_number, n_neighbors = 10, use_rep = "X_scanorama")
		sc.tl.umap(adata)
		sc.tl.tsne(adata, n_pcs = pcs_number, use_rep = "X_scanorama")

	elif integrate_method == "harmony":
		sc.pp.highly_variable_genes(adata, n_top_genes = hvg_num, batch_key = batch_key)
		sc.pp.scale(adata, max_value = 10)
		sc.tl.pca(adata, use_highly_variable = True, n_comps = pcs_number)
		sc.external.pp.harmony_integrate(adata, key = batch_key, basis = "X_pca")
		sc.pp.neighbors(adata, n_pcs = pcs_number, n_neighbors = 10, use_rep = "X_pca_harmony")
		sc.tl.umap(adata)
		sc.tl.tsne(adata, n_pcs = pcs_number, use_rep = "X_pca_harmony")

	elif integrate_method == "scvi":
		sc.pp.highly_variable_genes(adata, n_top_genes = hvg_num, batch_key = batch_key)
		sc.pp.scale(adata, max_value = 10)
		sc.tl.pca(adata, use_highly_variable = True, n_comps = pcs_number)
		adata,model = ef.scvi_batch_effect(adata = adata, batch_key = batch_key, layer = 'counts')
		model.save(f'{args.outdir}/scvi_model')
		sc.pp.neighbors(adata, n_neighbors = 10, use_rep = 'X_scvi')
		sc.tl.umap(adata)
		sc.tl.tsne(adata, use_rep = "X_scvi")

	else:
		sc.pp.highly_variable_genes(adata, n_top_genes = hvg_num, batch_key = batch_key)
		sc.pp.scale(adata, max_value = 10)
		sc.tl.pca(adata, use_highly_variable = True, n_comps = pcs_number)
		sc.pp.neighbors(adata, n_pcs = pcs_number, n_neighbors = 10, use_rep = "X_pca")
		sc.tl.umap(adata)
		sc.tl.tsne(adata, n_pcs = pcs_number, use_rep = "X_pca")

	fig,ax = plt.subplots(figsize=(6,5), constrained_layout = True)
	sc.pl.pca_variance_ratio(adata, n_pcs = pcs_number, log = True, show = False)
	plt.savefig(f'{args.outdir}/ElbowPlot_PC.pdf')
	return adata

def find_cluster(adata, resolution):
	cluster_method = config_dict['flexible_parameters']['cluster_method']
	if cluster_method == "leiden":
		sc.tl.leiden(adata, resolution = resolution, key_added = f'leiden_res_{resolution}')
	elif cluster_method == "louvain":
		sc.tl.louvain(adata, resolution = resolution, key_added = f'louvain_res_{resolution}')
	else:
		raise Exception("[Cluster_MethodError: ] cluster_method must be leiden or lovain, please check.")
	return adata

def plot_dim(adata, resolution, colorby, samples):
	if len(samples) > 1:
		fig,ax = plt.subplots(1,3, figsize=(18,5), constrained_layout = True)
		sc.pl.tsne(adata, color = 'Sample', show = False, ax = ax[0])
		sc.pl.tsne(adata, color = colorby, show = False, ax = ax[1])
		sc.pl.tsne(adata, color = colorby, legend_loc = 'on data', show = False, ax = ax[2])
		plt.savefig(f'{args.outdir}/clusters_resolution{resolution}/Sample_cluster_tsne.pdf')

		fig,ax = plt.subplots(1,3, figsize=(18,5), constrained_layout = True)
		sc.pl.umap(adata, color = 'Sample', show = False, ax = ax[0])
		sc.pl.umap(adata, color = colorby, show = False, ax = ax[1])
		sc.pl.umap(adata, color = colorby, legend_loc = 'on data', show = False, ax = ax[2])
		plt.savefig(f'{args.outdir}/clusters_resolution{resolution}/Sample_cluster_umap.pdf')
	else:
		fig,ax = plt.subplots(1,2, figsize=(12,5), constrained_layout = True)
		sc.pl.tsne(adata, color = colorby, show = False, ax = ax[0])
		sc.pl.tsne(adata, color = colorby, legend_loc = 'on data', show = False, ax = ax[1])
		plt.savefig(f'{args.outdir}/clusters_resolution{resolution}/cluster_tsne.pdf')
 
		fig,ax = plt.subplots(1,2, figsize=(12,5), constrained_layout = True)
		sc.pl.umap(adata, color = colorby, show = False, ax = ax[0])
		sc.pl.umap(adata, color = colorby, legend_loc = 'on data', show = False, ax = ax[1])
		plt.savefig(f'{args.outdir}/clusters_resolution{resolution}/cluster_umap.pdf')

def find_all_marker(adata, groupby, resolution, method):
	if method == "scanpy":
		diff_method = config_dict['flexible_parameters']['diff_method']
		sc.tl.rank_genes_groups(adata, groupby = groupby, method = diff_method, rankby_abs = True, use_raw = False)
		result = pd.DataFrame(sc.get.rank_genes_groups_df(adata, group = None))
		result.to_csv(f'{args.outdir}/clusters_resolution{resolution}/project.FindAllMarkers.xls', sep = "\t", header = True, index = False)
		adata.uns['log1p']["base"] = None
		group_list = list(result['group'].drop_duplicates())
		for i in group_list:
			group_tmp = result[result['group'] == i]
			group_tmp.to_csv(f'{args.outdir}/clusters_resolution{resolution}/cluster_{i}.xls', sep = "\t", header = True, index = False)
	else:
		result = ef.scvi_DE(adata, groupby, model = f'{args.outdir}/scvi_model')
		result = result.rename_axis('gene').reset_index()
		result.to_csv('scvi_marker_gene.xls', sep = "\t", header = True, index = False)
		group_list = list(result['group1'].drop_duplicates())
		for i in group_list:
			group_tmp = result[result['group'] == i]
			group_tmp.to_csv(f'{args.outdir}/clusters_resolution{resolution}/cluster_scvi_{i}.xls', sep = "\t", header = True, index = False)

	return adata

if __name__ == "__main__":
	args = get_opt()
	if not os.path.isdir(args.outdir):
		os.makedirs(args.outdir)
	print(args)
	args.outdir = args.outdir[0:-1] if args.outdir.endswith("/") else args.outdir
	config_dict = parse_json_config(args.config_file)
	samples = args.sample_list.split(",")
	adata = create_obj(args.infile, args.data_type, samples)
	adata = filter_data(adata, samples)
	adata = remove_batch_effect(adata, args.integrate_method, args.pcs_number)
	resolution_list = args.resolution.split(",")
	for res in resolution_list:
		res = float(res)
		if not os.path.isdir(f'{args.outdir}/clusters_resolution{res}'):
			os.makedirs(f'{args.outdir}/clusters_resolution{res}')
		adata = find_cluster(adata, res)
		plot_dim(adata, res, f'{cluster_method}_res_{res}', samples)
		cluster_method = config_dict['flexible_parameters']['cluster_method']
		adata = find_all_marker(adata, f'{cluster_method}_res_{res}', res)
		adata.write(f'{args.outdir}/clusters_resolution{res}/project.h5ad', compression = 'gzip')
