from functools import cached_property
import json
import logging
from pathlib import Path
from typing import ClassVar, Generator
from typing_extensions import Self
from pydantic import BaseModel, field_validator
import polars as pl

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("Pathogen")


class FastaRecord(BaseModel):
    taxid: str
    query_name: str
    sequence: str

    @classmethod
    def from_fastq(cls, fastq: Path) -> Generator[Self, None, None]:
        with open(fastq) as reader:
            title, seq = "", ""
            for i, line in enumerate(reader):
                match i % 4:
                    case 0:
                        if title and seq:
                            yield cls(taxid=title.rsplit("|", 1)[-1], query_name=title.split(" ", 1)[0], sequence=seq)
                        title = line.strip().lstrip("@")
                    case 1:
                        seq = line.strip()


class Kraken(BaseModel):
    Mbp: ClassVar[int] = 1000 * 1000
    fastp_json: Path
    kraken2_report_tsv: Path
    kraken2_fastq: Path
    bracken_out_tsv: list[Path]
    out_prefix: str

    @field_validator("out_prefix", mode="before")
    @classmethod
    def validate_out_prefix(cls, v: str) -> str:
        Path(v).parent.mkdir(parents=True, exist_ok=True)
        return v

    @cached_property
    def pathogen_tsv(self) -> Path:
        return Path(f"{self.out_prefix}.pathogen.tsv")

    @cached_property
    def reads_tsv(self) -> Path:
        return Path(f"{self.out_prefix}.reads.tsv")

    @cached_property
    def total_reads(self) -> int:
        with open(self.fastp_json) as reader:
            data = json.load(reader)
            return data["summary"]["after_filtering"]["total_reads"]

    @cached_property
    def bracken_records(self) -> pl.DataFrame:
        dataframes = []
        for bracken_out_tsv in self.bracken_out_tsv:
            logger.info("Read %s", bracken_out_tsv)
            dataframes.append(
                pl.read_csv(
                    bracken_out_tsv,
                    separator="\t",
                    columns=["taxonomy_id", "kraken_assigned_reads", "new_est_reads", "fraction_total_reads"],
                    new_columns=["taxid", "uniq_reads", "total_reads", "rel_abund"],
                    schema_overrides={"taxid": str},
                )
            )
        return pl.DataFrame(pl.concat(dataframes)).with_columns((pl.col("uniq_reads") / self.total_reads * self.Mbp).alias("rpm"))

    @cached_property
    def kraken_records(self) -> pl.DataFrame:
        records = pl.read_csv(self.kraken2_report_tsv, separator="\t", has_header=False, columns=[3, 4, 5], new_columns=["rank", "taxid", "name"], schema_overrides={"taxid": str})
        records = records.with_columns(pl.col("name").str.strip_chars(" ").alias("name"))
        species_records = (
            records.filter(pl.col("rank").str.starts_with("S"))
            .with_columns(pl.col("rank").str.replace("[A-Z]", "").replace("", "0").cast(pl.Int64).alias("level"))
            .select(["taxid", "level"])
        )
        # 1069201
        leaf_records = pl.concat(
            map(
                lambda x: x[:1],
                filter(
                    lambda y: y["level"].arg_max() == 0,
                    map(lambda i: species_records.slice(i, 2), range(species_records.shape[0])),
                ),
            )
        ).with_columns(pl.lit("Y").alias("is_leaf"))
        records = records.join(leaf_records, on="taxid", how="left").with_columns(pl.col("is_leaf").fill_null("N").alias("is_leaf"))
        pathogen_row, pathogen_rows = {}, []
        for row in records.iter_rows(named=True):
            if row["rank"] == "G":
                pathogen_row.update({"genus_taxid": row["taxid"], "genus_name": row["name"]})
            elif row["rank"] == "S":
                pathogen_row.update({"species_taxid": row["taxid"], "species_name": row["name"]})
            if row["is_leaf"] == "Y":
                if row["taxid"] == "1069201":
                    print(pathogen_row)
                pathogen_row.update({"taxid": row["taxid"], "name": row["name"], "rank": "species" if row["rank"] == "S" else "no rank"})
                pathogen_rows.append(pathogen_row.copy())
        return pl.DataFrame(pathogen_rows).filter(pl.col("taxid").ne("9606"))

    def write_pathogen(self) -> pl.DataFrame:
        logger.info("Count reads by taxid, species, genus taxid")
        records = self.kraken_records.join(self.bracken_records, on="taxid", how="left")
        # 种统计信息
        columns_dict = dict(map(lambda x: (x, f"species_{x}"), self.bracken_records.columns))
        records = records.join(self.bracken_records.rename(columns_dict), on="species_taxid", how="left")
        records = records.join(
            records.select(["species_taxid", "species_uniq_reads"])
            .unique(["species_taxid"])
            .sort(["species_uniq_reads"], descending=True)
            .with_row_index(name="species_ranking", offset=1)
            .select(["species_taxid", "species_ranking"]),
            on="species_taxid",
            how="left",
        )
        # 属统计信息
        columns_dict = dict(map(lambda x: (x, f"genus_{x}"), self.bracken_records.columns))
        records = records.join(self.bracken_records.rename(columns_dict), on="genus_taxid", how="left")
        records = records.filter(pl.col("uniq_reads").is_not_null()).sort(["uniq_reads"], descending=True)
        records = records.join(
            records.select(["genus_taxid", "genus_uniq_reads"])
            .unique(["genus_taxid"])
            .sort(["genus_uniq_reads"], descending=True)
            .with_row_index(name="genus_ranking", offset=1)
            .select(["genus_taxid", "genus_ranking"]),
            on="genus_taxid",
            how="left",
        )
        logger.info("Write pathogen data to %s", self.pathogen_tsv)
        records.write_csv(self.pathogen_tsv, separator="\t", null_value="-")

    def write_reads(self) -> pl.DataFrame:
        logger.info("Read  %s", self.pathogen_tsv)
        pathogen_records = pl.read_csv(self.pathogen_tsv, separator="\t", columns=["taxid", "species_taxid"]).cast(pl.String)
        logger.info("Read sequence from %s", self.kraken2_fastq)
        records = pl.DataFrame(map(lambda x: x.model_dump(), FastaRecord.from_fastq(self.kraken2_fastq)))
        records = (
            records.join(pathogen_records, on="taxid", how="left")
            .filter(pl.col("species_taxid").is_not_null())
            .select(
                ["taxid", "species_taxid", "query_name", "sequence"],
            )
        )
        logger.info("Write reads to %s", self.reads_tsv)
        records.sort(["species_taxid", "taxid"]).write_csv(self.reads_tsv, separator="\t")

    def run(self):
        if not self.pathogen_tsv.exists():
            self.write_pathogen()
        if not self.reads_tsv.exists():
            self.write_reads()


def set_options(sub_parser):
    parser = sub_parser.add_parser("kb", help="Integrated results of Kraken2 and Bracken")
    parser.add_argument("--kraken2_report_tsv", "-kraken2", type=Path, required=True, help="input, Kraken2 report TSV file")
    parser.add_argument("--kraken2_fastq", "-fastq", type=Path, required=True, help="input, Kraken2 FASTA file")
    parser.add_argument("--bracken_out_tsv", "-bracken", type=Path, required=True, nargs="+", help="input, Bracken subspecies TSV file")
    parser.add_argument("--fastp_json", "-fastp", type=Path, required=True, help="input, Fastp JSON file")
    parser.add_argument("--out_prefix", "-out", required=True, help="output, Output prefix")
    parser.set_defaults(func=lambda args: Kraken.model_validate(vars(args)).run())
