#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File        : mngs_mjn_sample_ai.py
@Author      : Bing Liang
@Email       : believer19940901@gmail.com
@Date        : 2025/11/18 15:04
@Description :
    解析样本分析 JSON（MaiJN）并生成欧盟报告所需的病原体列表、
    耐药基因列表（RGI）、毒力因子列表（VF）等表格。
"""

from argparse import ArgumentParser, Namespace
from pathlib import Path
import json
import pandas as pd
from collections import defaultdict
from typing import Dict


# ----------------------------------------------------------------------
# 参数解析
# ----------------------------------------------------------------------
def _parse_args() -> Namespace:
    """
    解析命令行参数，用于指定样本编号、芯片编号及输出文件。
    """
    parser = ArgumentParser(description="生成欧盟报告的病原体与基因表格")

    parser.add_argument("--sample_id", type=str, required=True, help="样本编号")
    parser.add_argument("--out_file", type=str, required=True, help="病原体输出文件路径（TSV）")
    parser.add_argument("--out_vf", type=str, required=True, help="毒力因子输出文件路径（TSV）")
    parser.add_argument("--out_rgi", type=str, required=True, help="耐药基因输出文件路径（TSV）")

    return parser.parse_args()


# ----------------------------------------------------------------------
# 工具函数
# ----------------------------------------------------------------------
def _get_type2(species_type: str, stype: str) -> str:
    """
    生成类型2字段（F、P、MTC 等）。
    """
    if stype != "/":
        return stype

    if species_type == "真菌":
        return "F"
    elif species_type == "寄生虫":
        return "P"
    elif species_type == "分枝杆菌":
        return "MTC"
    else:
        return stype

def _parse_lites(job_id: str, sample_id: str) -> Dict:
    """
    解析基因组长度文件 和 比对文件 计算覆盖度 平均深度 覆盖碱基
    :return:
    """
    ref_len = Path("/data/mNGS/pipeline/script/table/ref_length_4_coverage.xls")
    lites = Path(f"/data/mNGS/runmngs/result/{job_id}/{sample_id}/{sample_id}.litesam4ref")
    ref_len_dict = defaultdict(int)
    lites_dict = {}
    with open(ref_len, "r") as fr:
        for line in fr:
            if line := line.strip():
                items = line.split("\t")
                ref_len_dict[items[0]] = int(items[1])
    with open(lites, "r") as fr:
        for line in fr:
            if line := line.strip():
                sp, *rest = line.split(",")
                sp_dict = defaultdict(int)
                ref_len = ref_len[sp]
                for r in rest:
                    items = r.split("-")
                    p = int(items[0])
                    l = int(items[1])
                    if p <= ref_len:
                        for i in range(l):
                            sp_dict[p + i] += 1
                lites_dict[sp] = sp_dict
    out_dict = defaultdict(dict)
    for k, v in lites_dict.items():
        taxid = int(k.split("_")[0])
        out_dict[taxid]["覆盖长度（bp）"] = len(v)
        out_dict[taxid]["覆盖度（%）"] = round((len(v) / ref_len[k]) * 100, 8)
        out_dict[taxid]["平均深度（x）"] = round(sum(v.values()) / ref_len[k], 8)
    return out_dict


def _parse_aro() -> dict:
    """
    解析 CARD 数据库 aro_index_tax.tsv，获得 ARO Name → ARO Accession 映射。
    """
    aro_file = Path("/data/mNGS/pipeline/script/card") / "aro_index_tax.tsv"
    if not aro_file.exists():
        return {}

    df = pd.read_csv(aro_file, sep="\t")
    return df.set_index("ARO Name")["ARO Accession"].to_dict()


def _parse_vf() -> dict:
    """
    解析 VFDB 描述文件 vf.descriptions.xls，返回 gene → description 字典。

    文件格式举例：
        gene1   "description text"
        gene2   "description text"
    """
    vf_path = Path("/data/mNGS/pipeline/script/vfdb/vf.descriptions.xls")
    out_dict = {}

    if not vf_path.exists():
        return out_dict

    with open(vf_path, "r", encoding="gbk") as f:
        for line in f:
            line = line.strip()
            if not line:
                continue

            parts = line.split("\t")
            if len(parts) < 2:
                continue

            gene, desc = parts[0], parts[1]
            out_dict[gene] = desc.strip().strip('"').strip("'")

    return out_dict


# ----------------------------------------------------------------------
# 主程序
# ----------------------------------------------------------------------
def main(args: Namespace) -> None:
    """
    读取样本 JSON，解析病原体、耐药基因、毒力因子数据并输出成表格。
    """
    # 输出路径准备
    out_file = Path(args.out_file).absolute()
    out_vf = Path(args.out_vf).absolute()
    out_rgi = Path(args.out_rgi).absolute()

    out_file.parent.mkdir(parents=True, exist_ok=True)
    out_vf.parent.mkdir(parents=True, exist_ok=True)
    out_rgi.parent.mkdir(parents=True, exist_ok=True)

    # 加载 JSON
    json_file = Path("/data/mNGS/runmngs/MaiJN") / f"{args.sample_id}.json"
    if not json_file.exists():
        raise FileNotFoundError(f"找不到样本 JSON 文件：{json_file}")

    with open(json_file, "r", encoding="utf-8") as f:
        json_dict = json.load(f)

    # ------------------------------------------------------------------
    # 病原体列表
    # ------------------------------------------------------------------
    type1_map = {
        "细菌": "细菌",
        "真菌": "真菌",
        "virus/RNA": "病毒",
        "virus/DNA": "病毒",
        "分枝杆菌": "分枝杆菌",
        "支原体": "细菌",
        "衣原体": "细菌",
        "寄生虫": "寄生虫",
    }

    pan_ref = {
        "高": "检出",
        "中": "检出",
        "低": "检出",
        "疑似背景微生物": "定植或微生态"
    }

    out_dict = defaultdict(list)

    species_list = json_dict.get("posmicro_List", []) + json_dict.get("back_List", [])
    lites_dict = _parse_lites(args.job_id, args.sample_id)
    for sp in species_list:
        out_dict["taxid"].append(sp["taxid"])
        out_dict["类型1"].append(type1_map.get(sp["species_type"], sp["species_type"]))
        out_dict["类型2"].append(_get_type2(sp["species_type"], sp["stype"]))
        out_dict["属(中文名)"].append(sp["genuscn"])
        out_dict["属(拉丁名)"].append(sp["genus"])
        out_dict["属(序列数)"].append(sp["gReads"])
        out_dict["属相对丰度（%）"].append(sp["gAbundance"])

        out_dict["种(中文名)"].append(sp["species_cn"])
        out_dict["种(拉丁名)"].append(sp["species"])
        out_dict["种(序列数)"].append(sp["species_reads"])
        out_dict["种相对丰度（%）"].append(sp["sAbundance"])

        out_dict["覆盖长度（bp）"].append(lites_dict[sp["taxid"]]["覆盖长度（bp）"] if sp["taxid"] in lites_dict else 0)
        out_dict["覆盖度（%）"].append(lites_dict[sp["taxid"]]["覆盖度（%）"] if sp["taxid"] in lites_dict else 0.00)
        out_dict["平均深度（x）"].append(lites_dict[sp["taxid"]]["平均深度（x）"] if sp["taxid"] in lites_dict else 0.00)

        out_dict["检出类型"].append(sp["species_focous"])
        out_dict["判读参考"].append(pan_ref.get(sp["species_focous"], "定植或微生态"))
        out_dict["临床解释"].append(sp["description"])
        out_dict["参考文献"].append(sp["reference"])
        out_dict["覆盖图名称"].append(sp['taxid'] + "_" +  sp["species"].replace(" ", "_") + ".covMap.png")

    pd.DataFrame(out_dict).to_csv(out_file, sep="\t", index=False)

    # ------------------------------------------------------------------
    # RGI（耐药基因）
    # ------------------------------------------------------------------
    rgi_dict = defaultdict(list)
    aro_dict = _parse_aro()

    for sp in json_dict.get("rgi_List", []):
        for ar in sp.get("area", []):
            gene = ar["gene"]

            rgi_dict["ARO_Id"].append(aro_dict.get(gene, gene))
            rgi_dict["基因"].append(gene)
            rgi_dict["比对一致率（%）"].append("NA")
            rgi_dict["覆盖度（%）"].append(ar["coverage"])
            rgi_dict["相关抗生素"].append(ar["drug_cn"])
            rgi_dict["相关菌属"].append(ar["sample_cn"])

    if rgi_dict:
        pd.DataFrame(rgi_dict).to_csv(out_rgi, sep="\t", index=False)
    else:
        with open(out_rgi, "w", encoding="utf-8") as f:
            f.write("ARO_Id\t基因\t比对一致率（%）\t覆盖度（%）\t相关抗生素\t相关菌属\n")
            f.write("NA\tNA\tNA\tNA\tNA\tNA\n")

    # ------------------------------------------------------------------
    # VF（毒力因子）
    # ------------------------------------------------------------------
    vf_dict = defaultdict(list)
    gene_vf = _parse_vf()

    for vf in json_dict.get("vf_List", []):
        gene = vf["gene"]

        vf_dict["VF_Id"].append("NA")
        vf_dict["基因"].append(gene)
        vf_dict["比对一致率（%）"].append("NA")
        vf_dict["毒力因子"].append(gene_vf.get(gene, "NA"))
        vf_dict["相关菌属"].append(vf["pathogen"])

    if vf_dict:
        pd.DataFrame(vf_dict).to_csv(out_vf, sep="\t", index=False)
    else:
        with open(out_vf, "w", encoding="utf-8") as f:
            f.write("VF_Id\t基因\t比对一致率（%）\t毒力因子\t相关菌属\n")
            f.write("NA\tNA\tNA\tNA\tNA\n")


# ----------------------------------------------------------------------
if __name__ == "__main__":
    main(_parse_args())
