from enum import Enum
from functools import cached_property
import logging
from pathlib import Path
import zipfile
from pydantic import BaseModel, Field
import polars as pl

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("Clinical Pathogen")


class Signal(str, Enum):
    # pylint: disable=C0103
    S = "强"
    M = "中"
    W = "弱"
    B = "背景"


class Result(BaseModel):
    ctrl_sample: list[str]
    pathogen_tsv: list[Path]
    reads_tsv: list[Path]
    resfinder_tsv: list[Path]
    virulencefinder_tsv: list[Path]
    result_zip: Path
    target_tsv: Path | None = Field(default=None)

    @cached_property
    def sample_num(self) -> int:
        return len(self.pathogen_tsv) - len(self.ctrl_sample)

    @cached_property
    def target_records(self) -> pl.DataFrame:
        if self.target_tsv:
            logger.info("Read Target from %s", self.target_tsv)
            return pl.read_csv(self.target_tsv, separator="\t", columns=["taxid"]).cast(pl.String).with_columns(pl.lit("tNGS").alias("target"))
        return pl.DataFrame(schema={"taxid": str, "target": str})

    @cached_property
    def res_records(self) -> pl.DataFrame:
        logger.info("Read AMR")
        columns = ["Resistance gene", "Identity", "Phenotype", "Accession no."]
        new_columns = ["gene", "identity", "drug", "accession"]
        records = pl.concat(
            map(
                lambda x: pl.read_csv(
                    x,
                    separator="\t",
                    columns=columns,
                    new_columns=new_columns,
                )
                .with_columns(pl.col("drug").str.replace(", ", ",", n=-1).alias("drug"))
                .with_columns(pl.col("accession").str.split(".").list.first().alias("accession"))
                .with_columns(pl.lit(x.name.split(".", 1)[0]).alias("sample")),
                self.resfinder_tsv,
            )
        )
        return records.filter(~pl.col("sample").is_in(self.ctrl_sample))

    @cached_property
    def vf_records(self) -> pl.DataFrame:
        logger.info("Read VF")
        columns = ["Virulence factor", "Identity", "Protein function", "Accession number"]
        new_columns = ["virulence_factor", "identity", "protein_function", "accession"]
        records = pl.concat(
            map(
                lambda x: pl.read_csv(
                    x,
                    separator="\t",
                    columns=columns,
                    new_columns=new_columns,
                )
                .with_columns(pl.col("accession").str.split(".").list.first().alias("accession"))
                .with_columns(pl.lit(x.name.split(".", 1)[0]).alias("sample")),
                self.virulencefinder_tsv,
            )
        )
        return records.filter(~pl.col("sample").is_in(self.ctrl_sample))

    @cached_property
    def reads_records(self) -> pl.DataFrame:
        logger.info("Read Reads")
        records = pl.concat(
            map(
                lambda x: pl.read_csv(x, separator="\t").with_columns(pl.lit(x.name.split(".", 1)[0]).alias("sample")),
                self.reads_tsv,
            )
        )
        return records.filter(~pl.col("sample").is_in(self.ctrl_sample))

    @cached_property
    def pathogen_records(self) -> pl.DataFrame:
        # 读取并初始化数据
        records = pl.concat(
            map(
                lambda x: pl.read_csv(
                    x,
                    separator="\t",
                    null_values=["-"],
                    schema_overrides={"taxid": str, "species_taxid": str, "genus_taxid": str},
                ).with_columns(pl.lit(x.name.split(".", 1)[0]).alias("sample")),
                self.pathogen_tsv,
            )
        )
        # 最大RPM，强信号调整
        max_records = records.group_by("taxid").agg(pl.col("rpm").max().alias("max_rpm"), pl.col("uniq_reads").max().alias("max_reads"))
        records = records.join(max_records, on=["taxid"], how="left").with_columns(((pl.col("rpm") - pl.col("max_rpm") * 0.001).clip(lower_bound=0)).alias("rpm"))
        # 阴控信息
        ctrl_records = (
            records.filter(pl.col("sample").is_in(self.ctrl_sample))
            .group_by("taxid")
            .agg(
                pl.col("rpm").max().alias("ctrl_rpm"),
                pl.col("uniq_reads").max().alias("ctrl_reads"),
            )
        )
        records = records.filter(~pl.col("sample").is_in(self.ctrl_sample)).join(ctrl_records, on=["taxid"], how="left").fill_null(0)
        # 样本批次内排名
        rank_records = pl.concat(
            map(
                lambda x: x[1].sort("rpm", descending=True).with_row_index(name="sample_ranking", offset=1).select(["taxid", "sample", "sample_ranking"]),
                records.group_by(["taxid"]),
            )
        )
        records = records.join(rank_records, on=["taxid", "sample"], how="left")
        return records

    @cached_property
    def classified_records(self) -> pl.DataFrame:
        # 初始化信号
        records = self.pathogen_records.with_columns(
            pl.when(pl.col("rpm").ge(1000))
            .then(pl.lit(Signal.S.value))
            .otherwise(pl.when(pl.col("rpm").ge(200)).then(pl.lit(Signal.M.value)).otherwise(pl.lit(Signal.W.value)))
            .alias("signal")
        )
        # 阴控背景: 同一Pathogen在待测样本RPM至少是阴控样本中RPM的10倍以上，否则为背景信号
        records = records.with_columns(
            pl.when(pl.col("signal").is_in([Signal.S.value, Signal.M.value]) & pl.col("ctrl_rpm").gt(0) & (pl.col("rpm") / pl.col("ctrl_rpm")).le(10))
            .then(pl.lit(Signal.B.value))
            .otherwise(pl.col("signal"))
            .alias("signal")
        )
        # 批次背景: 对于强/中信号样本，对于待测样本的Pathogen信号，在同一批次样本中信号强度>=待测样本信号强度的样本数至多为50%，否则为背景信号
        records = (
            records.join(records.group_by("taxid").agg(pl.col("signal").alias("signals")), on=["taxid"], how="left")
            .with_columns(
                pl.when(pl.col("signal").eq(Signal.S.value))
                .then(pl.col("signals").list.count_matches(Signal.S.value))
                .otherwise(
                    pl.when(pl.col("signal").eq(Signal.M.value))
                    .then(pl.col("signals").list.count_matches(Signal.M.value) + pl.col("signals").list.count_matches(Signal.S.value))
                    .otherwise(pl.lit(0))
                )
                .alias("signals")
            )
            .with_columns(pl.when((pl.col("signals") / self.sample_num).gt(0.5)).then(pl.lit(Signal.B.value)).otherwise(pl.col("signal")).alias("signal"))
            .with_columns(pl.format("{}/{}", pl.col("signals"), self.sample_num).alias("signals"))
        )
        # 过滤目标物种
        if not self.target_records.is_empty():
            for key in ["taxid", "species_taxid", "genus_taxid"]:
                records = records.join(self.target_records.rename({"taxid": key.replace("taxid", "target")}), how="left", left_on="taxid", right_on=key)
            records = records.with_columns(pl.col("taxid").fill_null(pl.col("species_target")).fill_null(pl.col("species_target"))).drop(["species_target", "genus_target"])
        return records

    def run(self):
        with zipfile.ZipFile(self.result_zip, "w", zipfile.ZIP_DEFLATED) as zip_writer:
            zip_writer.writestr("RES.tsv", self.res_records.write_csv(separator="\t", null_value="-"))
            zip_writer.writestr("VF.tsv", self.vf_records.write_csv(separator="\t", null_value="-"))
            zip_writer.writestr("Reads.tsv", self.reads_records.write_csv(separator="\t", null_value="-"))
            zip_writer.writestr("Pathogen.tsv", self.classified_records.write_csv(separator="\t", null_value="-"))


def set_options(sub_parser):
    parser = sub_parser.add_parser("tngs", help="Output tNGS result ")
    parser.add_argument("--pathogen_tsv", "-patho", type=Path, required=True, nargs="+", help="input, Pathogen TSV files")
    parser.add_argument("--reads_tsv", "-reads", type=Path, required=True, nargs="+", help="input, Reads TSV files")
    parser.add_argument("--resfinder_tsv", "-res", type=Path, required=True, nargs="+", help="input, ResFinder TSV file")
    parser.add_argument("--virulencefinder_tsv", "-vf", type=Path, required=True, nargs="+", help="input, VirulenceFinder TSV file")
    parser.add_argument("--target_tsv", "-tgt", type=Path, help="input, Target TSV file contains TaxID")
    parser.add_argument("--ctrl_sample", "-ctrl", nargs="+", required=True, help="parameter, Control sample")
    parser.add_argument("--result_zip", "-out", type=Path, required=True, help="output, Result zip file")
    parser.set_defaults(func=lambda args: Result.model_validate(vars(args)).run())
