#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File    : microbiome_qc_stats.py
@Author  : Bing Liang
@Email   : believer19940901@gmail.com
@Date    : 2025/10/13
@Description : 微生物组质控信息统计合并（单流程，不拆分函数）
"""

from argparse import ArgumentParser, Namespace
from pathlib import Path
from collections import defaultdict
import pandas as pd
import json

def main(args: Namespace):
    """
    主函数：解析 fastp、去宿主、去重、Kraken2 报告，合并生成 QC 统计表
    """
    # 分割输入文件列表和样本名
    samples = args.samples.strip().split(",")
    fastp_json_files = args.fastp_json_files.strip().split(",")
    rmhost_stat_files = args.rmhost_stat_files.strip().split(",")
    rmdup_stat_files = args.rmdup_stat_files.strip().split(",")
    kraken2_report_files = args.kraken2_report_files.strip().split(",")

    out_dic = defaultdict(list)

    # 遍历每个样本
    for sample, fastp_json_file, rmhost_stat_file, rmdup_stat_file, kraken2_report_file in zip(
        samples, fastp_json_files, rmhost_stat_files, rmdup_stat_files, kraken2_report_files
    ):
        # ----------------------
        # fastp JSON 解析
        # ----------------------
        with open(fastp_json_file, "r", encoding="utf-8") as f:
            json_data = json.load(f)

        # 原始测序统计
        raw_bases = round(int(json_data["summary"]["before_filtering"]["total_bases"]) / (1024**3), 4)
        raw_reads = int(json_data["summary"]["before_filtering"]["total_reads"])
        raw_q30 = float(json_data["summary"]["before_filtering"]["q30_rate"]) * 100
        raw_gc = float(json_data["summary"]["before_filtering"]["gc_content"]) * 100

        # 质控后统计
        clean_bases = round(int(json_data["summary"]["after_filtering"]["total_bases"]) / (1024**3), 4)
        clean_reads = int(json_data["summary"]["after_filtering"]["total_reads"])
        clean_q30 = float(json_data["summary"]["after_filtering"]["q30_rate"]) * 100
        clean_gc = float(json_data["summary"]["after_filtering"]["gc_content"]) * 100

        # 添加 fastp 信息到字典
        out_dic["Sample"].append(sample)
        out_dic["Raw Bases(G)"].append(raw_bases)
        out_dic["Raw Reads"].append(raw_reads)
        out_dic["Raw Q30(%)"].append(raw_q30)
        out_dic["Raw GC(%)"].append(raw_gc)
        out_dic["Clean Bases(G)"].append(clean_bases)
        out_dic["Clean Reads"].append(clean_reads)
        out_dic["Clean Q30(%)"].append(clean_q30)
        out_dic["Clean GC(%)"].append(clean_gc)

        # ----------------------
        # 去宿主统计
        # ----------------------
        rmhost_df = pd.read_csv(rmhost_stat_file, sep=r"\s+", thousands=",")
        unhost_reads = int(rmhost_df["num_seqs"].iloc[0])
        host_reads = clean_reads - unhost_reads
        out_dic["Host Reads"].append(host_reads)
        out_dic["Unhost Reads"].append(unhost_reads)
        out_dic["Host Rate(%)"].append(round(host_reads / clean_reads * 100, 4))
        out_dic["Unhost Rate(%)"].append(round(unhost_reads / clean_reads * 100, 4))

        # ----------------------
        # 去重统计
        # ----------------------
        rmdup_df = pd.read_csv(rmdup_stat_file, sep=r"\s+", thousands=",")
        undup_reads = int(rmdup_df["num_seqs"].iloc[0])
        dup_reads = unhost_reads - undup_reads
        out_dic["Duplicate Reads"].append(dup_reads)
        out_dic["Unduplicate Reads"].append(undup_reads)
        out_dic["Duplicate Rate(%)"].append(round(dup_reads / clean_reads * 100, 4))
        out_dic["Unduplicate Rate(%)"].append(round(undup_reads / clean_reads * 100, 4))

        # ----------------------
        # Kraken2 分类统计
        # ----------------------
        with open(kraken2_report_file, "r", encoding="utf-8") as f:
            line = f.readline()
            items = line.strip().split()
            unclf_reads = int(items[1])
        clf_reads = undup_reads - unclf_reads
        out_dic["Unclassified Reads"].append(unclf_reads)
        out_dic["Classified Reads"].append(clf_reads)
        out_dic["Unclassified Rate(%)"].append(round(unclf_reads / clean_reads * 100, 4))
        out_dic["Classified Rate(%)"].append(round(clf_reads / clean_reads * 100, 4))

    # ----------------------
    # 输出 QC 统计表
    # ----------------------
    out_path = Path(args.qc_stats_out)
    out_path.parent.mkdir(parents=True, exist_ok=True)
    group_df = pd.read_csv(args.group_info, sep="\t")
    out_df = group_df.merge(pd.DataFrame(out_dic), on="Sample", how="right").sort_values(by=["Group"])
    out_df.to_excel(out_path, index=False, engine="openpyxl")
    print(f"✅ QC 统计完成，输出文件: {out_path}")

if __name__ == "__main__":
    parser = ArgumentParser(description="微生物组质控信息统计合并（单流程）")
    parser.add_argument("--samples", type=str, required=True, help="样本名称列表，逗号分隔")
    parser.add_argument("--group_info", type=str, required=True, help="分组信息")
    parser.add_argument("--fastp_json_files", type=str, required=True, help="fastp JSON 文件列表，逗号分隔")
    parser.add_argument("--rmhost_stat_files", type=str, required=True, help="去宿主统计文件列表，逗号分隔")
    parser.add_argument("--rmdup_stat_files", type=str, required=True, help="去重统计文件列表，逗号分隔")
    parser.add_argument("--kraken2_report_files", type=str, required=True, help="Kraken2 报告文件列表，逗号分隔")
    parser.add_argument("--qc_stats_out", type=str, required=True, help="输出 QC 统计表路径")
    args = parser.parse_args()
    main(args)
