 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File    : tngs_conserved_regions.py
@Author  : Bing Liang
@Email   : believer19940901@gmail.com
@Date    : 2025/11/11 17:28
@Description : tNGS引物设计
"""
from argparse import Namespace, ArgumentParser
from pathlib import Path
from multiprocessing import Pool, cpu_count
from datetime import datetime
import subprocess
import logging
import json
from typing import Sequence


# --------
# 日志
# --------
logging.basicConfig(
    level=logging.INFO,
    format="[%(asctime)s] - [%(levelname)s] - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
)
LOGGER = logging.getLogger("WES")


# --------
# 软件
# --------
PYTHON_ENV = Path("/home/bioinfo/software/miniconda3/envs/python-venv/bin")
MAFFT_ENV = Path("/home/bioinfo/software/miniconda3/envs/mafft/bin")
DATASETS = Path("/data/jinlj/software/jenv/bin/datasets")
ARIA2C_ENV = Path("/home/bioinfo/software/miniconda3/envs/aria2c/bin")


# --------
# 脚本
# --------
SCRIPTS = Path(__file__).parent.resolve()
TNGS_PARSE_JSON_PY = SCRIPTS / "tngs_parse_json.py"
TNGS_SPLIT_GENE_PY = SCRIPTS / "tngs_split_gene.py"
TNGS_SELECT_TEMPLATES = SCRIPTS / "tngs_select_templates.py"


# --------
# 辅助函数
# --------
def _parse_arguments():
    """
    解析命令行参数
    :return:
    """
    parser = ArgumentParser()
    parser.add_argument("-c", "--config_json", type=str, required=True, default=None)
    return parser.parse_args()


def _run_or_die(sh_cmd: str, sh_file: Path, log_file: Path, ok_file: Path):
    """
    执行 shell 命令。若失败则记录错误，成功则创建 .ok 文件。
    """
    if ok_file.exists():
        LOGGER.info(f"跳过 {sh_file}（已完成）")
        return

    try:
        LOGGER.info(f"执行任务：{sh_file}")
        with open(sh_file, "w", encoding="utf-8") as fs:
            fs.write(sh_cmd)

        with open(log_file, "w", encoding="utf-8") as fl:
            result = subprocess.run(
                f"bash {sh_file}",
                shell=True,
                text=True,
                stdout=fl,
                stderr=fl,
            )

        if result.returncode != 0:
            LOGGER.error(f"执行失败：{sh_file}，退出码 {result.returncode}")
            return

        ok_file.touch()
        LOGGER.info(f"完成：{sh_file}")

    except Exception as e:
        LOGGER.error(f"执行出错：{sh_file}，错误：{e}")


def _run_cmds(sh_files: list[Path], sh_cmds: list[str], cpu_n: int = 1, check: bool = True):
    """
    并行执行多个命令脚本。
    """
    ok_files = [f.with_suffix(".ok") for f in sh_files]
    log_files = [f.with_suffix(".log") for f in sh_files]

    with Pool(cpu_n) as pool:
        for sh_cmd, sh_file, ok_file, log_file in zip(sh_cmds, sh_files, ok_files, log_files):
            pool.apply_async(_run_or_die, args=(sh_cmd, sh_file, log_file, ok_file, ))
        pool.close()
        pool.join()

    if check:
        for ok_file in ok_files:
            if not ok_file.exists():
                raise RuntimeError(f"缺少完成标志文件：{ok_file}")


def _check_out_files(out_files: Sequence[Path]) -> None:
    """
    检查输出文件是否存在或为空，如果为空直接抛异常。

    :param out_files: 输出文件列表
    :raises RuntimeError: 如果文件不存在或为空
    """
    missing_files = [f for f in out_files if not f.exists()]
    empty_files = [f for f in out_files if f.exists() and f.stat().st_size == 0]

    if missing_files or empty_files:
        messages = []
        if missing_files:
            messages.append(f"不存在文件: {', '.join(map(str, missing_files))}")
        if empty_files:
            messages.append(f"为空文件: {', '.join(map(str, empty_files))}")
        raise RuntimeError("; ".join(messages))


# --------
# 分析流程
# --------
class Begin:
    """
    启动分析
    """

    def __init__(self, args: Namespace):

        self.begin_time = datetime.now()
        self.step_n = 0

        # --------
        # 读取配置信息
        # --------
        self.config_json = Path(args.config_json).absolute()
        with open(self.config_json, "r", encoding="utf-8") as handle:
            self.config_dict: dict = json.load(handle)

        # --------
        # 输出目录
        # --------
        self.out_dir = Path(self.config_dict.get("out_dir")).absolute()
        self.log_dir = self.out_dir / "log"
        if not self.out_dir:
            LOGGER.error("输出目录为空")
            raise RuntimeError("输出目录为空")

        # --------
        # 物种ID
        # --------
        self.tax_id = self.config_dict.get("tax_id")
        if not self.tax_id:
            LOGGER.error("物种编号为空")
            raise RuntimeError("物种编号为空")

        # --------
        # 基因组并行下载数
        # --------
        self.cpu_n = self.config_dict.get("cpu_n", cpu_count() // 2)

        # --------
        # 最少基因组覆盖数
        # --------
        self.min_genome = self.config_dict.get("min_seq", 10)

        # --------
        # 总的最小长度
        # --------
        self.min_len = self.config_dict.get("min_len", 300)

        # --------
        # 总的最大长度
        # --------
        self.max_len = self.config_dict.get("max_len", 1000)

        # --------
        # 总的最大变异数
        # --------
        self.max_ward = self.config_dict.get("max_ward", 5)

        # --------
        # 保守区最小长度
        # --------
        self.min_conv = self.config_dict.get("min_con", 25)

        # --------
        # 保守性阈值
        # 用于获取多数共识碱基
        # --------
        self.conservation_threshold = self.config_dict.get("conservation_threshold", 0.95)

    def run(self) -> None:
        LOGGER.info(f"开始设计物种[{self.tax_id}]的引物")
        self.out_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)


class Datasets(Begin):
    """
    使用NCBI Datasets 下载物种所有完整的基因组序列和GFF文件
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.datasets_out_name = f"step{self.step_n}_datasets"
        self.datasets_out_dir: Path = self.out_dir / self.datasets_out_name
        self.datasets_log_dir: Path = self.log_dir / self.datasets_out_name
        self.datasets_out_zip = self.datasets_out_dir / f"{self.tax_id}.meta.zip"
        self.datasets_out_json = (
                self.datasets_out_dir /
                "ncbi_dataset" /
                "data" /
                f"dataset_catalog.json"
        )

    def run(self) -> None:
        super().run()
        self.datasets_out_dir.mkdir(parents=True, exist_ok=True)
        self.datasets_log_dir.mkdir(parents=True, exist_ok=True)
        _run_cmds(
            [
              self.datasets_log_dir / f"{self.datasets_out_name}.sh"
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -vexuo pipefail\n\n"
                    f"{DATASETS} download genome taxon {self.tax_id} "
                    f"--assembly-level complete "
                    f"--include genome,gff3 "
                    f"--assembly-source all "
                    f"--dehydrated "
                    f"--filename {self.datasets_out_zip}\n\n"
                    f"cd {self.datasets_out_dir} && unzip -o {self.datasets_out_zip}\n\n"
                )
            ],
            1
        )
        _check_out_files(
            [
                self.datasets_out_json
            ]
        )


class ParseJson(Datasets):
    """
    解析json格式元文件，生成每个基因组的genome和gff3的urls
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.parse_json_out_name = f"step{self.step_n}_parse_json"
        self.parse_json_out_dir: Path = self.out_dir / self.parse_json_out_name
        self.parse_json_log_dir: Path = self.log_dir / self.parse_json_out_name

    def run(self) -> None:
        super().run()
        self.parse_json_out_dir.mkdir(parents=True, exist_ok=True)
        self.parse_json_log_dir.mkdir(parents=True, exist_ok=True)
        _run_cmds(
            [
                self.parse_json_log_dir / f"{self.parse_json_out_name}.sh"
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -vexuo pipefail\n\n"
                    f"export PATH={PYTHON_ENV}:$PATH\n\n"
                    f"python3 {TNGS_PARSE_JSON_PY} "
                    f"--catalog {self.datasets_out_json} "
                    f"--out_dir {self.parse_json_out_dir}\n\n"
                )
            ],
            1
        )


class Aria2C(ParseJson):
    """
    下载每个accession的genome和gff
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.aria2c_out_name = f"step{self.step_n}_aria2c"
        self.aria2c_out_dir = self.out_dir / self.aria2c_out_name
        self.aria2c_log_dir = self.log_dir / self.aria2c_out_name

    def run(self) -> None:
        super().run()
        self.aria2c_log_dir.mkdir(parents=True, exist_ok=True)
        self.aria2c_out_dir.mkdir(parents=True, exist_ok=True)
        # 获取所有的accession目录
        acc_dirs = list(self.parse_json_out_dir.glob("*"))
        acc_nums = [
            acc_dir.name
            for acc_dir in acc_dirs
        ]
        ari_dirs = [
            self.aria2c_out_dir / acc_num
            for acc_num in acc_nums
        ]
        _run_cmds(
            [
                self.aria2c_log_dir / f"{acc_num}.sh"
                for acc_num in acc_nums
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -vexuo pipefail\n\n"
                    f"export PATH={ARIA2C_ENV}:$PATH\n\n"
                    f"aria2c -c -x16 -s16 -j 16 -i "
                    f"{acc_dir / f'{acc_num}.urls'} -d {ari_dir}\n\n"
                )
                for
                acc_num, acc_dir, ari_dir
                in
                zip(acc_nums, acc_dirs, ari_dirs)
            ],
            self.cpu_n
        )


class SplitGene(Aria2C):
    """
    将基因组按照基因进行拆分
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.split_out_name = f"step{self.step_n}_split_gene"
        self.split_out_dir = self.out_dir / self.split_out_name
        self.split_log_dir = self.log_dir / self.split_out_name

    def run(self) -> None:
        super().run()
        self.split_log_dir.mkdir(parents=True, exist_ok=True)
        self.split_out_dir.mkdir(parents=True, exist_ok=True)
        _run_cmds(
            [
                self.split_log_dir / f"{self.split_out_name}.sh"
            ],
            [
                (
                    f"#!/bin/bash\n\n"
                    f"set -vexuo pipefail\n\n"
                    f"export PATH={PYTHON_ENV}:$PATH\n\n"
                    f"python3 {TNGS_SPLIT_GENE_PY} "
                    f"-i {self.aria2c_out_dir} "
                    f"-o {self.split_out_dir}\n\n"
                )
            ],
            1
        )


class Mafft(SplitGene):
    """
    使用MAFFT多序列比对将基因序列对齐
    """

    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.mafft_out_name = f"step{self.step_n}_mafft"
        self.mafft_out_dir = self.out_dir / self.mafft_out_name
        self.mafft_log_dir = self.log_dir / self.mafft_out_name

    def run(self) -> None:
        super().run()
        self.mafft_log_dir.mkdir(parents=True, exist_ok=True)
        self.mafft_out_dir.mkdir(parents=True, exist_ok=True)
        genes = list(self.split_out_dir.glob("*.fas"))
        alns = [
            self.mafft_out_dir / f"{gene.stem}.aln"
            for gene in genes
        ]
        _run_cmds(
            [
                self.mafft_log_dir / f"{gene.stem}.sh"
                for gene in genes
            ],
            [
                f"#!/bin/bash\n"
                f"\n"
                f"set -vexuo pipefail\n"
                f"\n"
                f"export PATH={MAFFT_ENV}:$PATH\n"
                f"\n"
                f"# 统计序列数量（FASTA header 数）\n"
                f"SEQ_COUNT=$(grep -c '^>' \"{gene}\")\n"
                f"\n"
                f"# 少于等于 1 条则跳过比对并直接复制原序列\n"
                f"if [ \"$SEQ_COUNT\" -le 1 ]; then\n"
                f"    echo \"[INFO] 跳过 MAFFT: {gene} 序列数=$SEQ_COUNT\"\n"
                f"    cp \"{gene}\" \"{aln}\"\n"
                f"    exit 0\n"
                f"fi\n"
                f"\n"
                f"# 正常比对\n"
                f"mafft --auto \"{gene}\" > \"{aln}\""
                for gene, aln in zip(genes, alns)
            ],
            32
        )
        _check_out_files(alns)


# TODO：使用不用的设计模式去设计引物
#     设计引物模板（方法一）
#     前后的引物区完全保守（30bp左右）
#     中间的扩增区相对保守，允许少量的兼并碱基和共识碱基
#     限制模版的最小长度和最大长度
#     理论上模板的任何区域都可以设计引物
#     适合变异位点不多本身就很保守的物种
#     最后生成的序列不是真实序列而是人工序列


class SelectTemplates(Mafft):
    """
    设计引物模板（方法二）
    前后的引物区完全保守
    中间的扩增区保守性没有限制
    引物区限制最小长度
    扩增区限制最大长度和最小长度
    模板序列是基于模板菌株的真实序列
    """
    def __init__(self, args):
        super().__init__(args)
        self.step_n += 1
        self.select_templates_out_name = f"step{self.step_n}_select_templates"
        self.select_templates_out_dir = self.out_dir / self.select_templates_out_name
        self.select_templates_log_dir = self.log_dir / self.select_templates_out_name

    def run(self) -> None:
        super().run()
        self.select_templates_log_dir.mkdir(parents=True, exist_ok=True)
        self.select_templates_out_dir.mkdir(parents=True, exist_ok=True)
        _run_cmds(
            [self.select_templates_log_dir / f"{self.select_templates_out_name}.sh"],
            [
                (
                    f"#!/bin/bash\n"
                    f"set -vexuo pipefail\n"
                    f"export PATH={PYTHON_ENV}:$PATH\n"
                    f"python3 {TNGS_SELECT_TEMPLATES} "
                    f"--in_dir {self.mafft_out_dir} "
                    f"--out_dir {self.select_templates_out_dir} "
                    f"--min_amp {self.min_len} "
                    f"--max_amp {self.max_len} "
                    f"--min_genome {self.min_genome} "
                    f"--con_len {self.min_conv}\n\n"
                )
            ],
            1
        )


class End(SelectTemplates):
    """最终步骤：输出耗时"""

    def __init__(self, args: Namespace):
        super().__init__(args)

    def run(self):
        super().run()
        elapsed = datetime.now() - self.begin_time
        LOGGER.info(f"任务完成，总耗时：{elapsed}")


def main():
    arguments = _parse_arguments()
    End(arguments).run()


if __name__ == "__main__":
    main()