#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File    : wes_check_gene.py
@Author  : Bing Liang
@Email   : believer19940901@gmail.com
@Date    : 2025/10/23 16:23
@Description : 检测单个基因的所有变异
"""
from argparse import Namespace, ArgumentParser
from pathlib import  Path
from multiprocessing import Pool
from datetime import  datetime
import subprocess
import logging
import json
from typing import Sequence, Dict


# --------
# 软件
# --------
PYTHON3 = Path("/home/bioinfo/software/miniconda3/envs/python-venv/bin")
BWA = Path("/home/bioinfo/software/miniconda3/envs/bwa/bin")
SAMTOOLS = Path("/home/bioinfo/software/miniconda3/envs/samtools/bin")
FASTP = Path("/home/bioinfo/software/miniconda3/envs/fastp/bin")
FASTQC = Path("/home/bioinfo/software/miniconda3/envs/fastqc/bin")
PICARD = Path("/home/bioinfo/software/miniconda3/envs/picard/bin")
GATK4 = Path("/home/bioinfo/software/miniconda3/envs/gatk4/bin")
JAVA21 = Path("/home/bioinfo/software/miniconda3/envs/java21/bin/java")
SNPEFF_JAR = Path("/home/bioinfo/software/miniconda3/envs/snpeff/share/snpeff-5.3.0a-0/snpEff.jar")
SNPSIFT_JAR = Path("/home/bioinfo/software/miniconda3/envs/snpsift/share/snpsift-5.3.0a-0/SnpSift.jar")
REF_NAME = "GRCh37.p13"
TARGETS_BED = Path("/data/mNGS/WES/database/GRCh37.targets.dedup.bed")
ANTITARGETS_BED = Path("/data/mNGS/WES/database/GRCh37.antitargets.dedup.bed")
REFERENCE_CNN = Path("/data/mNGS/WES/database/GRCh37.reference.dedup.cnn")
CNVKIT_PTRHON3 = Path("/home/bioinfo/software/miniconda3/envs/cnvkit0911/bin/python3")
R_BASE = Path("/home/bioinfo/software/miniconda3/envs/r-base/bin")
REF_FNA = Path("/data/mNGS/WES/database/GRCh37.fna")


# --------
# 脚本
# --------
CNVKIT_PY = Path("/home/bioinfo/software/miniconda3/envs/cnvkit0911/bin/cnvkit.py")
WES_CHECK_CNV_PY = Path("/home/bioinfo/repositories/beescripts/src/wes_check_cnv.py")
PARSE_SNP_PY = Path("/home/bioinfo/repositories/beescripts/src/wes_parse_snp.py")


# --------
# annovar配置
# --------
TABLE_ANNOVAR_PL = Path("/home/bioinfo/software/annovar/table_annovar.pl")
ANNOVAR_HUMANDB = Path("/home/bioinfo/software/annovar/humandb/")
ANNOVAR_PROTOCOL = "refGene,exac03,avsnp150,ALL.sites.2015_08,EAS.sites.2015_08"
ANNOVAR_OPERATION = "g,f,f,f,f"


# ----------------------------
# 日志配置
# ----------------------------
logging.basicConfig(
    level=logging.INFO,
    format="[%(asctime)s] - [%(levelname)s] - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
)
LOGGER = logging.getLogger("BJZD-WES-CHECK-GENES")


# ----------------------------
# 辅助函数
# ----------------------------
def _run_or_die(sh_cmd: str, sh_file: Path, log_file: Path, ok_file: Path, skip_ok: bool = True) -> None:
    """
    执行单个 shell 命令。若执行成功，生成 `.ok` 文件。
    若失败则记录日志，但不抛出异常（由外部统一检查）。

    Parameters
    ----------
    sh_cmd : str
        要写入并执行的 shell 命令内容。
    sh_file : Path
        shell 脚本文件路径。
    log_file : Path
        运行日志输出文件路径。
    ok_file : Path
        用于标记任务成功完成的 .ok 文件。
    """
    if ok_file.exists() and skip_ok:
        LOGGER.info(f"已跳过：{sh_file}（存在 .ok 文件）")
        return

    try:
        LOGGER.info(f"开始执行：{sh_file}")

        # 写入 shell 文件
        with open(sh_file, "w", encoding="utf-8") as fs:
            fs.write(sh_cmd)

        # 执行脚本并捕获输出
        with open(log_file, "w", encoding="utf-8") as fl:
            result = subprocess.run(
                f"bash {sh_file}",
                shell=True,
                text=True,
                stdout=fl,
                stderr=fl,
            )

        if result.returncode != 0:
            LOGGER.error(
                f"执行失败：{sh_file}，退出码={result.returncode}"
            )
            return

        # 任务成功，创建 OK 文件
        ok_file.touch()
        LOGGER.info(f"执行完成：{sh_file}")

    except Exception as e:
        LOGGER.error(f"执行异常：{sh_file}，错误：{e}")


def _run(
    sh_files: Sequence[Path],
    sh_cmds: Sequence[str],
    cpu_n: int = 1,
    check_ok: bool = True
) -> None:
    """
    并行执行多个 shell 脚本。

    Parameters
    ----------
    sh_files : Sequence[Path]
        要生成的 shell 脚本文件列表。
    sh_cmds : Sequence[str]
        shell 命令字符串列表。与 sh_files 位置一一对应。
    cpu_n : int, optional
        并行进程数，默认 1。
    check_ok : bool, optional
        是否在执行结束后检查所有 .ok 文件，默认 True。

    Raises
    ------
    RuntimeError
        若任务要求检查 ok 文件但某些 .ok 缺失，则抛出异常。
    """
    ok_files = [f.with_suffix(".ok") for f in sh_files]
    log_files = [f.with_suffix(".log") for f in sh_files]

    with Pool(cpu_n) as pool:
        for sh_cmd, sh_file, ok_file, log_file in zip(sh_cmds, sh_files, ok_files, log_files):
            pool.apply_async(
                _run_or_die,
                args=(sh_cmd, sh_file, log_file, ok_file)
            )
        pool.close()
        pool.join()

    if check_ok:
        failed = [sh for sh, ok in zip(sh_files, ok_files) if not ok.exists()]
        if failed:
            raise RuntimeError(
                "以下步骤未生成 OK 文件：\n  " + "\n  ".join(map(str, failed))
            )


def _check(out_files: Sequence[Path], empty_ok: bool = False) -> None:
    """
    检查指定输出文件是否存在，是否为空。

    Parameters
    ----------
    out_files : Sequence[Path]
        输出文件路径列表。
    empty_ok : bool, optional
        是否允许空文件，默认 False。

    Raises
    ------
    RuntimeError
        当文件不存在或为空（且 empty_ok=False）时抛出。
    """
    missing_files = [f for f in out_files if not f.exists()]
    empty_files = [f for f in out_files if f.exists() and f.stat().st_size == 0]

    if missing_files:
        raise RuntimeError(
            "以下文件不存在：\n  " + "\n  ".join(map(str, missing_files))
        )

    if empty_files and not empty_ok:
        raise RuntimeError(
            "以下文件为空：\n  " + "\n  ".join(map(str, empty_files))
        )


# --------
# 分析流程
# --------
class Begin:
    """
    流程启动-解析配置文件
    """

    def __init__(self, args):

        # 步骤计数
        self.step_n = 0

        # 流程启动时间
        self.begin_time = datetime.now()

        # 配置文件
        self.config_json = Path(args.config_json).resolve()
        with open(self.config_json, "r", encoding="utf-8") as fr:
            self.config_dict = json.load(fr)

        # 获取输出目录
        self.out_dir = self.config_dict.get("out_dir")
        if not self.out_dir:
            LOGGER.error("无法获取输出目录")
            raise RuntimeError("无法获取输出目录")
        else:
            self.out_dir = Path(self.out_dir).resolve()

        # 获取日志目录
        self.log_dir = self.config_dict.get("log_dir")
        if not self.log_dir:
            LOGGER.error("无法获取日志目录")
            raise RuntimeError("无法获取日志目录")
        else:
            self.log_dir = Path(self.log_dir).resolve()

        # 获取样本列表
        self.samples_reads: dict = self.config_dict.get("samples")
        if not self.samples_reads:
            LOGGER.error("无法获取样本列表")
            raise RuntimeError("无法获取样本列表")
        self.samples = [sample for sample in self.samples_reads]

        # 检查原始数据
        for sample in self.samples:
            reads = self.samples_reads.get(sample)
            if not reads:
                LOGGER.error(f"无法获取样本{sample}的原始数据")
                raise RuntimeError(f"无法获取样本{sample}的原始数据")
            else:
                if len(reads) < 2:
                    LOGGER.error(f"样本{sample}原始数据缺失")
                    raise RuntimeError(f"样本{sample}原始数据缺失")
                else:
                    for read in reads:
                        read = Path(read).resolve()
                        if not read.exists() or not read.is_file():
                            LOGGER.error(f"原始数据{read}无法获取")
                            raise RuntimeError(f"原始数据{read}无法获取")
        # 获取基因列表
        self.genes_dict: Dict = self.config_dict.get("genes")
        if not self.genes_dict:
            LOGGER.error("无法获取基因列表")
            raise RuntimeError("无法获取基因列表")
        self.genes = [gene for gene in self.genes_dict]

        # 检查每个基因的染色体 起始位点 终止位点是否确实
        for gene in self.genes:
            if len(self.genes_dict[gene]) != 3:
                LOGGER.error(f"基因{gene}缺少位置信息")
                raise RuntimeError(f"基因{gene}缺少位置信息")

        # 获取并行数
        self.cpu_n = self.config_dict.get("cpu_n", 1)


    def run(self):
        LOGGER.info("WES CHECK GENES TASK STARTED...")
        self.out_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)


class Fastp(Begin):
    """
    去除接头和低质量reads
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.fastp_name = f"step{self.step_n}_fastp"
        self.fastp_out_dir = self.out_dir / self.fastp_name
        self.fastp_log_dir = self.log_dir / self.fastp_name
        self.fastp_out_f_reads = [
            self.fastp_out_dir / f"{sample}_1.fq.gz"
            for sample in self.samples
        ]
        self.fastp_out_r_reads = [
            self.fastp_out_dir / f"{sample}_2.fq.gz"
            for sample in self.samples
        ]
        self.fastp_out_jsons = [
            self.fastp_out_dir / f"{sample}.json"
            for sample in self.samples
        ]
        self.fastp_out_htmls = [
            self.fastp_out_dir / f"{sample}.html"
            for sample in self.samples
        ]

    def run(self):
        super().run()
        self.fastp_out_dir.mkdir(parents=True, exist_ok=True)
        self.fastp_log_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [   self.fastp_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [   f"#!/bin/bash\n\n"
                f"set -euo pipefail\n\n"
                f"export PATH={FASTP}:$PATH\n\n"
                f"fastp "
                f"-i {self.samples_reads[sample][0]} "
                f"-I {self.samples_reads[sample][1]} "
                f"-o {f_read} "
                f"-O {r_read} "
                f"-q 20 -u 30 -n 5 -w 4 "
                f"-h {out_html} "
                f"-j {out_json}\n\n"
                for sample, f_read, r_read, out_json, out_html
                in zip(
                self.samples,
                self.fastp_out_f_reads,
                self.fastp_out_r_reads,
                self.fastp_out_jsons,
                self.fastp_out_htmls
                )
            ],
            self.cpu_n,
        )


class MapGenome(Fastp):
    """
    将clean reads 比对到参考基因组上
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.map_genome_out_name = f"step{self.step_n}_map_genome"
        self.map_genome_out_dir = self.out_dir / self.map_genome_out_name
        self.map_genome_log_dir = self.log_dir / self.map_genome_out_name
        self.map_genome_out_bams = [
            self.map_genome_out_dir / f"{sample}.bam"
            for sample in self.samples
        ]

    def run(self):
        super().run()
        self.map_genome_out_dir.mkdir(parents=True, exist_ok=True)
        self.map_genome_log_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.map_genome_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -euo pipefail\n\n"
                    f"export PATH={BWA}:$PATH\n\n"
                    f"export PATH={SAMTOOLS}:$PATH\n\n"
                    f"bwa mem -t 8 {REF_FNA} {f_read} {r_read} | "
                    f"samtools view -@ 8 -b -h -F 0x104 | "
                    f"samtools sort -@ 8 -o {out_bam}\n\n"
                    f"samtools index {out_bam}\n\n"
                )
                for
                    f_read,
                    r_read,
                    out_bam
                in
                zip (
                    self.fastp_out_f_reads,
                    self.fastp_out_r_reads,
                    self.map_genome_out_bams
                )
            ],
            self.cpu_n
        )


class RmDup(MapGenome):
    """
    去除重复reads
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.rm_dup_out_name = f"step{self.step_n}_rm_dup"
        self.rm_dup_out_dir = self.out_dir / self.rm_dup_out_name
        self.rm_dup_log_dir = self.log_dir / self.rm_dup_out_name
        self.rm_dup_sorted_bams  = [
            self.rm_dup_out_dir / f"{sample}.sorted.bam"
            for sample in self.samples
        ]
        self.rm_dup_rg_bams = [
            self.rm_dup_out_dir / f"{sample}.sorted.rg.bam"
            for sample in self.samples
        ]
        self.rm_dup_dedup_bams = [
            self.rm_dup_out_dir / f"{sample}.sorted.rg.dedup.bam"
            for sample in self.samples
        ]
        self.rm_dup_dedup_metrics = [
            self.rm_dup_out_dir / f"{sample}.metrics.txt"
            for sample in self.samples
        ]


    def run(self):
        super().run()
        self.rm_dup_out_dir.mkdir(parents=True, exist_ok=True)
        self.rm_dup_log_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.rm_dup_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -euo pipefail\n\n"
                    f"export PATH={PICARD}:$PATH\n\n"
                    f"picard SortSam "
                    f"-I {in_bam} "
                    f"-O {sorted_bam} "
                    f"-SORT_ORDER coordinate "
                    f"-CREATE_INDEX true "
                    f"-TMP_DIR /data/mNGS/host_exp/tmp\n\n"
                    f"picard AddOrReplaceReadGroups "
                    f"-I {sorted_bam} "
                    f"-O {rg_bam} "
                    f"-SORT_ORDER coordinate "
                    f"-RGID {sample} "
                    f"-RGLB {sample} "
                    f"-RGPL ILLUMINA "
                    f"-RGPU {sample} "
                    f"-RGSM {sample} "
                    f"-CREATE_INDEX true\n\n"
                    f"picard MarkDuplicates "
                    f"-I {rg_bam} "
                    f"-O {dedup_bam} "
                    f"-M {dedup_matrix} "
                    f"-CREATE_INDEX true\n\n"
                )
                for (
                sample,  in_bam, sorted_bam, rg_bam, dedup_matrix, dedup_bam
            ) in zip(
                self.samples,
                self.map_genome_out_bams,
                self.rm_dup_sorted_bams,
                self.rm_dup_rg_bams,
                self.rm_dup_dedup_metrics,
                self.rm_dup_dedup_bams
            )
            ],
            self.cpu_n
        )


class CallSnpIndel(RmDup):
    """
    单样本检测SNP和INDEL
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.call_snp_indel_out_name = f"step{self.step_n}_call_snp_indel"
        self.call_snp_indel_out_dir = self.out_dir / self.call_snp_indel_out_name
        self.call_snp_indel_log_dir = self.log_dir / self.call_snp_indel_out_name
        self.call_snp_indel_out_vcfs = [
            self.call_snp_indel_out_dir / f"{sample}.raw.vcf.gz"
            for sample in self.samples
        ]

    def run(self):
        super().run()
        self.call_snp_indel_log_dir.mkdir(parents=True, exist_ok=True)
        self.call_snp_indel_out_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.call_snp_indel_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -euo pipefail\n\n"
                    f"export PATH={GATK4}:$PATH\n\n"
                    f"gatk HaplotypeCaller "
                    f"-R {REF_FNA} "
                    f"-I {in_bam} "
                    f"-O {out_vcf}\n\n"
                )
                for in_bam, out_vcf in zip(
                self.rm_dup_dedup_bams,
                self.call_snp_indel_out_vcfs
            )
            ],
            self.cpu_n
        )


class AnnotateSnpIndel(CallSnpIndel):

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.annotate_snp_indel_out_name = f"step{self.step_n}_annotate_snp_indel"
        self.annotate_snp_indel_out_dir = self.out_dir / self.annotate_snp_indel_out_name
        self.annotate_snp_indel_log_dir = self.log_dir / self.annotate_snp_indel_out_name
        self.annotate_snp_indel_out_vcfs = [
             self.annotate_snp_indel_out_dir / f"{sample}.annotated.vcf"
            for sample in self.samples
        ]
        self.annotate_snp_indel_out_csvs = [
            self.annotate_snp_indel_out_dir /  f"{sample}.annotated.csv"
            for sample in self.samples
        ]
        self.annotate_snp_indel_out_htmls = [
            self.annotate_snp_indel_out_dir / f"{sample}.annotated.html"
            for sample in self.samples
        ]


    def run(self):
        super().run()
        self.annotate_snp_indel_log_dir.mkdir(parents=True, exist_ok=True)
        self.annotate_snp_indel_out_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.annotate_snp_indel_log_dir /  f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"{JAVA21} -jar {SNPEFF_JAR} "
                    f"-v {REF_NAME} "
                    f"{in_vcf} "
                    f"-stats {out_html} "
                    f"-csvStats {out_csv} "
                    f"> {out_vcf}\n\n"
                )
                for in_vcf, out_csv, out_html, out_vcf in zip(
                self.call_snp_indel_out_vcfs,
                self.annotate_snp_indel_out_csvs,
                self.annotate_snp_indel_out_htmls,
                self.annotate_snp_indel_out_vcfs
            )
            ],
            self.cpu_n
        )


class FilterSnpIndel(AnnotateSnpIndel):
    """
    过滤 SNPs 和 Indels
    目前使用默认参数，后期调整
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.filter_snp_indel_out_name = f"step{self.step_n}_filter_snp_indel"
        self.filter_snp_indel_out_dir = self.out_dir / self.filter_snp_indel_out_name
        self.filter_snp_indel_log_dir = self.log_dir / self.filter_snp_indel_out_name

    def run(self):
        super().run()
        self.filter_snp_indel_log_dir.mkdir(parents=True, exist_ok=True)
        self.filter_snp_indel_out_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.filter_snp_indel_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -euo pipefail\n\n"
                    +
                    "".join([
                        (
                            f"{JAVA21} -jar {SNPSIFT_JAR} filter --filter {sample} "
                            f"\"(GEN[0].DP >= 10) & (ANN[*].GENE = '{gene}') & (GEN[0].GQ >= 20)\" "
                            f"{raw_vcf} > {self.filter_snp_indel_out_dir / f'{sample}.{gene}.vcf'}\n\n"
                        )
                        for gene in self.genes
                    ])
                )
                for raw_vcf, sample
                in zip(
                self.annotate_snp_indel_out_vcfs,
                self.samples
            )
            ],
            self.cpu_n
        )


class Annovar(FilterSnpIndel):
    """
    使用annovar对结果进行注释
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.annovar_out_name = f"step{self.step_n}_annovar"
        self.annovar_out_dir = self.out_dir / self.annovar_out_name
        self.annovar_log_dir = self.log_dir / self.annovar_out_name


    def run(self):
        super().run()
        self.annovar_out_dir.mkdir(parents=True, exist_ok=True)
        self.annovar_log_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.annovar_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -euo pipefail\n\n"
                    +
                    "".join([
                        (
                            f"mkdir -p {self.annovar_out_dir / sample / gene}\n\n"
                            f"perl {TABLE_ANNOVAR_PL} "
                            f"{self.filter_snp_indel_out_dir / f'{sample}.{gene}.vcf'} "
                            f"{ANNOVAR_HUMANDB} "
                            f"-buildver hg19 "
                            f"-out {self.annovar_out_dir / sample / gene} "
                            f"-remove "
                            f"-protocol {ANNOVAR_PROTOCOL} "
                            f"-operation {ANNOVAR_OPERATION} "
                            f"-nastring . -vcfinput -polish  -otherinfo\n\n"
                        )
                        for gene in self.genes
                    ])

                )
                for sample in self.samples
            ],
            self.cpu_n
        )


class CallCNV(Annovar):
    """
    检测CNV变异
    然后使用snpeff对cnv编译进行注释
    筛选指定基因的变异
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.call_cnv_out_name  = f"step{self.step_n}_call_cnv"
        self.call_cnv_out_dir = self.out_dir / self.call_cnv_out_name
        self.call_cnv_log_dir = self.log_dir / self.call_cnv_out_name
        self.call_cnv_sample_dirs = [
            self.call_cnv_out_dir / sample
            for sample in self.samples
        ]
        self.call_cnv_out_call_cns = [
            self.call_cnv_out_dir / sample /f"{sample}.call.cns"
            for sample in self.samples
        ]
        self.call_cnv_out_call_vcfs = [
            self.call_cnv_out_dir / sample /f"{sample}.call.vcf"
            for sample in self.samples
        ]
        self.call_cnv_out_annotated_vcfs = [
            self.call_cnv_out_dir/ sample / f"{sample}.annotated.vcf"
            for sample in self.samples
        ]



    def run(self):
        super().run()
        self.call_cnv_log_dir.mkdir(parents=True, exist_ok=True)
        self.call_cnv_out_dir.mkdir(parents=True, exist_ok=True)
        _run(
            [
                self.call_cnv_log_dir / f"{sample}.sh"
                for sample in self.samples
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -euo pipefail\n\n"
                    f"export PATH={R_BASE}:$PATH\n\n"
                    f"{CNVKIT_PTRHON3} {CNVKIT_PY}  batch "
                    f"{in_bam} -r {REFERENCE_CNN} -d {sample_dir}\n\n"                    
                    f"{CNVKIT_PTRHON3} {CNVKIT_PY}  export vcf "
                    f"{call_cns} -o {call_vcf}\n\n"
                    f"{JAVA21} -jar {SNPEFF_JAR} "
                    f"-v {REF_NAME} "
                    f"{call_vcf} "
                    f"-stats {call_vcf}.html "
                    f"-csvStats {call_vcf}.csv "
                    f"> {annotated_vcf}\n\n"
                    +
                    "".join([
                        (
                            f"{JAVA21} -jar {SNPSIFT_JAR} filter "
                            f"\"(ANN[*].GENE = '{gene}')\" "
                            f"{annotated_vcf} > "
                            f"{self.call_cnv_out_dir / f'{sample}.{gene}.filtered.cvf'}\n\n"
                        )
                        for gene in self.genes
                    ])
                )
                for
                    in_bam,
                    sample_dir,
                    call_cns,
                    call_vcf,
                    annotated_vcf,
                    filtered_vcf,
                    sample
                in
                zip(
                    self.map_genome_out_bams,
                    self.call_cnv_sample_dirs,
                    self.call_cnv_out_call_cns,
                    self.call_cnv_out_call_vcfs,
                    self.call_cnv_out_annotated_vcfs,
                    self.samples
                )
            ],
            len(self.samples),
        )

# class ParseSNP(CallCNV):
#     """
#     解析SNP结果文件
#     """
#
#     def __init__(self, args):
#         super().__init__(args)
#         self.step_n += 1
#         self.parse_snp_out_name = f"step{self.step_n}_parse_snp"
#         self.parse_snp_out_dir = self.out_dir / self.parse_snp_out_name
#         self.parse_snp_log_dir = self.log_dir / self.parse_snp_out_name
#         self.samples_str = ",".join(self.samples)
#
#     def run(self):
#         super().run()
#         self.parse_snp_out_dir.mkdir(parents=True, exist_ok=True)
#         self.parse_snp_log_dir.mkdir(parents=True, exist_ok=True)
#         _run(
#             [
#                 self.parse_snp_log_dir / f"{sample}.sh"
#                 for sample in self.samples
#             ],
#             [
#                 (
#                     f"#!/bin/bash\n\n"
#                     f"set -euo pipefail\n\n"
#                     f"export PATH={PYTHON3}:$PATH\n\n"
#                     +
#                     "".join([
#                         (
#                             f"python3 {PARSE_SNP_PY} "
#                             f"--in_file {self.filter_snp_indel_out_vcfs_str} "
#                             f"--out_file {self.parse_snp_out_file}\n\n"
#                         )
#                     ])
#
#                 )
#                 for sample in self.samples
#             ],
#             self.cpu_n
#         )


# class CheckCNV(ParseSNP):
#     """
#     检查 CNV 片段与目标基因区间的重叠情况
#     """
#
#     def __init__(self, args):
#         super().__init__(args)
#         self.step_n += 1
#         self.check_cnv_out_name = f"step{self.step_n}_check_cnv"
#         self.check_cnv_out_dir = self.out_dir / self.check_cnv_out_name
#         self.check_cnv_log_dir = self.log_dir / self.check_cnv_out_name
#         self.check_cnv_out_file = self.check_cnv_out_dir / f"{self.gene}_CNV.xlsx"
#         self.call_cnv_out_call_cns_str = ",".join(
#             [
#                 str(i)
#                 for i in self.call_cnv_out_call_cns
#             ]
#         )
#
#     def run(self):
#         super().run()
#         self.check_cnv_out_dir.mkdir(parents=True, exist_ok=True)
#         self.check_cnv_log_dir.mkdir(parents=True, exist_ok=True)
#         _run_cmds(
#             [
#                 self.check_cnv_log_dir / f"{self.check_cnv_out_name}.sh"
#             ],
#             [
#                 (
#                     f"#!/bin/bash\n\n"
#                     f"set -euo pipefail\n\n"
#                     f"export PATH={PYTHON3}:$PATH\n\n"
#                     f"python3 {WES_CHECK_CNV_PY} "
#                     f"--samples {self.samples_str} "
#                     f"--in_files {self.call_cnv_out_call_cns_str} "
#                     f"--out_file {self.check_cnv_out_file} "
#                     f"--chr_name '{self.chr_name}' "
#                     f"--gene_start {self.gene_start} "
#                     f"--gene_end {self.gene_end}\n\n"
#                 )
#             ],
#             1
#         )
#         _check_outputs([self.check_cnv_out_file])



class End(CallCNV):
    """最终步骤：输出耗时"""

    def __init__(self, args: Namespace):
        super().__init__(args)

    def run(self):
        super().run()
        elapsed = datetime.now() - self.begin_time
        LOGGER.info(f"任务完成，总耗时：{elapsed}")


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--config_json", type=Path, required=True, default=None)
    End(parser.parse_args()).run()
