import argparse
from concurrent import futures
from functools import cached_property
import gzip
import logging
from math import log
import os
from pathlib import Path
from typing import IO, ClassVar
from pydantic import BaseModel
import polars as pl

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("MetaGenomics")


class MetaSpecies(BaseModel):
    assembly_summary_tsv: Path
    taxdump_dir: Path
    species_tsv: Path

    @cached_property
    def taxid_records(self) -> pl.DataFrame:
        logger.info("Read %s", self.assembly_summary_tsv)
        records = pl.read_csv(
            self.assembly_summary_tsv,
            separator="\t",
            comment_prefix="##",
            columns=["taxid", "group"],
            schema_overrides={"taxid": str, "group": str},
        )
        return records.unique(["taxid"]).rename({"group": "organism_type"}).filter(pl.col("organism_type").is_in(self.organism_types))

    def write(self) -> pl.DataFrame:
        import pytaxonkit

        taxid_series = self.taxid_records["taxid"].unique()
        logger.info("Run TaxonKit: total %d", taxid_series.len())
        dataframes = []
        step = 5000
        for start in range(0, taxid_series.len(), step):
            logger.info("Run TaxonKit: %d-%d", start + 1, start + step)
            df = (
                pl.DataFrame(pl.from_pandas(pytaxonkit.lineage(taxid_series[start : start + step].to_list(), data_dir=self.taxdump_dir)))
                .cast(pl.String)
                .rename({"TaxID": "taxid", "Name": "name", "Rank": "rank"})
                .with_columns(pl.col("LineageTaxIDs").str.split(";").alias("lineage_taxids"))
                .with_columns(pl.col("Lineage").str.split(";").alias("lineages"))
                .with_columns(pl.col("lineage_taxids").list.get(-1).replace("", None).fill_null(pl.col("taxid")).alias("species_taxid"))
                .with_columns(pl.col("lineage_taxids").list.get(-2).replace("", "-").alias("genus_taxid"))
                .with_columns(pl.col("lineages").list.get(-1).replace("", None).fill_null(pl.col("name")).alias("species_name"))
                .with_columns(pl.col("lineages").list.get(-2).replace("", "-").alias("genus_name"))
                .select(["taxid", "name", "species_taxid", "species_name", "genus_taxid", "genus_name", "rank"])
            )
            dataframes.append(df)
        genus_records: pl.DataFrame = pl.concat(dataframes)
        records = self.taxid_records.join(genus_records, on="taxid", how="left").select(
            ["taxid", "name", "species_taxid", "species_name", "genus_taxid", "genus_name", "organism_type", "rank"]
        )
        logger.info("write %s", self.species_tsv)
        records.write_csv(self.species_tsv, separator="\t", null_value="-")


class MetaSummary(BaseModel):
    accession_records: ClassVar[pl.DataFrame] = pl.DataFrame()
    species_tsv: Path
    summary_tsv: Path

    class Config:
        arbitrary_types_allowed = True

    @cached_property
    def species_records(self) -> pl.DataFrame:
        logger.info("Read %s", self.species_tsv)
        return pl.read_csv(self.species_tsv, separator="\t", null_values=["-"]).cast(pl.String)

    def write(self):
        records = self.accession_records.join(self.species_records, on="taxid", how="left").filter(pl.col("species_taxid").is_not_null())
        logger.info("Write %s", self.summary_tsv)
        records.write_csv(self.summary_tsv, separator="\t", null_value="-")


class MetaGenome(BaseModel):
    nt_fasta: Path
    summary_tsv: Path
    genome_fasta: Path
    build: int

    @cached_property
    def accession_dict(self) -> dict[str, bool]:
        logger.info("Read %s", self.summary_tsv)
        return dict(map(lambda x: (x, True), pl.read_csv(self.summary_tsv, separator="\t", columns=["accession"])["accession"]))

    def write(self) -> Path:
        logger.info("Write %s", self.genome_fasta)
        with open(self.nt_fasta, "r") as reader, open(self.genome_fasta, "w") as writer:
            seq, acc = "", ""
            for line in reader:
                if line[0] == ">":
                    if acc and self.accession_dict.get(acc):
                        writer.write(f">{acc}\n{seq}")
                    print(acc)
                    seq, acc = "", line[1:].split(" ", 1)[0]
                else:
                    seq += line
            if acc and self.accession_dict.get(acc):
                writer.write(f">{acc}\n{seq}")
            logger.info("Written %s", self.genome_fasta)

    def bbmap_build(self):
        command = f"bbmap.sh ref={self.genome_fasta} path={self.genome_fasta.parent.parent} build={self.build}"
        if os.system(command) > 0:
            raise RuntimeError(f"run bbmap build fail: {command}")


class MetaGenomics(BaseModel):
    workdir: Path
    nt_fasta: Path
    taxdump_dir: Path
    accession2taxid_tsv: Path
    build: int

    @cached_property
    def species_tsv(self) -> Path:
        return self.workdir / "species.tsv"

    @cached_property
    def assembly_summary_tsv(self) -> Path:
        return self.workdir / "assembly_summary.txt"

    @cached_property
    def summary_tsv(self) -> Path:
        return self.workdir / "accession_summary.tsv"

    @cached_property
    def genome_fasta(self) -> Path:
        return self.workdir / "genome.fasta"

    def run(self):
        if not self.species_tsv.exists():
            MetaSpecies(assembly_summary_tsv=self.assembly_summary_tsv, taxdump_dir=self.taxdump_dir, species_tsv=self.species_tsv).write()
        if not self.summary_tsv.exists():
            if MetaSummary.accession_records.is_empty():
                logger.info("Read %s", self.accession2taxid_tsv)
                MetaSummary.accession_records = pl.read_csv(
                    self.accession2taxid_tsv, separator=" ", has_header=False, new_columns=["accession", "taxid"], schema_overrides={"taxid": str}
                )
            MetaSummary(species_tsv=self.species_tsv, summary_tsv=self.summary_tsv).write()

        meta_genome = MetaGenome(nt_fasta=self.nt_fasta, summary_tsv=self.summary_tsv, genome_fasta=self.genome_fasta, build=self.build)
        meta_genome.write()
        # if not self.genome_fasta.exists():
        #     meta_genome.write()


class MultiMetaGenomics(BaseModel):
    organism_types: ClassVar[list[str]] = ["archaea", "bacteria", "fungi", "viral", "protozoa"]
    workdir: Path
    nt_fasta: Path
    taxdump_dir: Path
    accession2taxid_tsv: Path

    # @cached_property
    # def summary_tsv(self):
    #     return self.workdir / "meta.accession_summary.tsv"

    def run(self):

        with futures.ProcessPoolExecutor(max_workers=len(self.organism_types)) as executor:
            tasks = []
            for i, organism_type in enumerate(self.organism_types, start=3):
                meta_genomics = MetaGenomics(
                    workdir=self.workdir / organism_type,
                    nt_fasta=self.nt_fasta,
                    taxdump_dir=self.taxdump_dir,
                    accession2taxid_tsv=self.accession2taxid_tsv,
                    build=i,
                )
                meta_genomics.run()
                # tasks.append(executor.submit(meta_genomics.write))
            for task in tasks:
                task.result()


pathogen_dir = Path("/mnt/nas/database/pathogen_2/pathogen_genomics/")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Pathogen Genome")
    parser.add_argument("-t", "--taxdump_dir", type=Path, default=pathogen_dir / "taxdump", help="taxdump directory")
    parser.add_argument("-a", "--accession2taxid_tsv", type=Path, default=Path("/mnt/nas/database/NT/nt.accession2taxid.txt"), help="accession2taxid file")
    parser.add_argument("-n", "--nt_fasta", type=Path, default=Path("/mnt/nas/database/NT/nt.fasta"), help="NT file")
    parser.add_argument("-w", "--workdir", type=Path, default=pathogen_dir, help="output directory")
    parser.set_defaults(func=lambda args: MultiMetaGenomics.model_validate(vars(args)).run())
    args = parser.parse_args()
    args.func(args)

# class Species(BaseModel):
#     organism_types: ClassVar[list[str]] = ["archaea", "bacteria", "fungi", "viral", "protozoa"]
#     assembly_summary_dir: Path
#     taxdump_dir: Path
#     outdir: Path

#     @cached_property
#     def meta_species_tsv(self) -> Path:
#         return self.outdir / "meta.species.tsv"

#     @cached_property
#     def taxid_records(self) -> pl.DataFrame:
#         logger.info("Read %s", self.assembly_summary_dir)
#         dataframes = []
#         for assembly_summary_tsv in self.assembly_summary_dir.glob("*.txt"):
#             t_records = (
#                 pl.read_csv(assembly_summary_tsv, separator="\t", comment_prefix="##", columns=["taxid", "group"], schema_overrides={"taxid": str, "group": str})
#                 .rename({"group": "organism_type"})
#                 .filter(pl.col("organism_type").is_in(self.organism_types))
#             )
#             dataframes.append(t_records)
#         return pl.concat(dataframes)

#     def write_species(self) -> pl.DataFrame:
#         import pytaxonkit

#         taxid_series = self.taxid_records["taxid"].unique()
#         logger.info("Run TaxonKit: total %d", taxid_series.len())
#         dataframes = []
#         step = 5000
#         for start in range(0, taxid_series.len(), step):
#             logger.info("Run TaxonKit: %d-%d", start + 1, start + step)
#             df = (
#                 pl.DataFrame(pl.from_pandas(pytaxonkit.lineage(taxid_series[start : start + step].to_list(), data_dir=self.taxdump_dir)))
#                 .cast(pl.String)
#                 .rename({"TaxID": "taxid", "Name": "name", "Rank": "rank"})
#                 .with_columns(pl.col("LineageTaxIDs").str.split(";").alias("lineage_taxids"))
#                 .with_columns(pl.col("Lineage").str.split(";").alias("lineages"))
#                 .with_columns(pl.col("lineage_taxids").list.get(-1).replace("", None).fill_null(pl.col("taxid")).alias("species_taxid"))
#                 .with_columns(pl.col("lineage_taxids").list.get(-2).replace("", "-").alias("genus_taxid"))
#                 .with_columns(pl.col("lineages").list.get(-1).replace("", None).fill_null(pl.col("name")).alias("species_name"))
#                 .with_columns(pl.col("lineages").list.get(-2).replace("", "-").alias("genus_name"))
#                 .select(["taxid", "name", "species_taxid", "species_name", "genus_taxid", "genus_name", "rank"])
#             )
#             dataframes.append(df)
#         genus_records: pl.DataFrame = pl.concat(dataframes)
#         records = self.taxid_records.join(genus_records, on="taxid", how="left").select(
#             ["taxid", "name", "species_taxid", "species_name", "genus_taxid", "genus_name", "organism_type", "rank"]
#         )
#         logger.info("write %s", self.meta_species_tsv)
#         records.write_csv(self.meta_species_tsv, separator="\t", null_value="-")


# class Summary(Species):

#     accession2taxid_tsv: Path

#     @cached_property
#     def meta_summary_tsv(self):
#         return self.outdir / "meta.summary.tsv"

#     def write_summary(self):
#         species_records = pl.read_csv(self.meta_species_tsv, separator="\t", null_values=["-"]).cast(pl.String)
#         print(species_records)
#         taxid2accessions: dict[str, list[str]] = dict(map(lambda x: (x, []), species_records["taxid"]))
#         with gzip.open(self.accession2taxid_tsv, "rt") if self.accession2taxid_tsv.suffix == ".gz" else open(self.accession2taxid_tsv, "r") as reader:
#             for accession, taxid in map(lambda x: x.split("\t")[1:3], reader):
#                 if taxid2accessions.get(taxid) is not None:
#                     taxid2accessions[taxid].append(accession)
#         records = (
#             pl.DataFrame(list(taxid2accessions.items()), schema={"taxid": str, "accession": list[str]}).explode(["accession"]).filter(pl.col("accession").is_not_null())
#         ).select(["accession", "taxid"])
#         records = records.join(species_records, on="taxid", how="left")
#         print(records)
#         records.write_csv(self.meta_summary_tsv, separator="\t", null_value="-")


# class MetaGenomics(Summary):
#     nt_genome_fasta: Path

#     def write_genome_fasta(self, accessions: pl.Series, out_fasta: Path) -> Path:
#         logger.info("Write %s", out_fasta)
#         acc_dict = dict(map(lambda x: (x, True), accessions))
#         with gzip.open(self.nt_genome_fasta, "wt") as reader, open(out_fasta, "w") as writer:
#             seq, acc = "", ""
#             for line in reader:
#                 if line[0] == ">":
#                     if acc and acc_dict.get(acc):
#                         writer.write(f">{acc}\n{seq}")
#                     seq, acc = "", line[1:].split(" ", 1)[0]
#                 else:
#                     seq += line
#             if acc and acc_dict.get(acc):
#                 writer.write(f">{acc}\n{seq}")
#             logger.info("Written %s", out_fasta)

#     def write_all_genome_fasta(self):
#         logger.info("Read %s", self.meta_species_tsv)
#         records = pl.read_csv(self.meta_species_tsv, separator="\t", columns=["accession", "organism_type"]).cast(pl.String)
#         with futures.ProcessPoolExecutor(max_workers=len(self.organism_types)) as executor:
#             tasks = []
#             for (organism_type,), t_records in records.group_by(["organism_type"]):
#                 out_fasta = self.outdir / "meta_genomes" / f"{organism_type}.fasta"
#                 out_fasta.parent.mkdir(exist_ok=True, parents=True)
#                 tasks.append(executor.submit(self.write_genome_fasta, t_records["accession"], out_fasta))
#             for task in futures.as_completed(tasks):
#                 task.result()

#     def run(self) -> None:
#         if not self.meta_species_tsv.exists():
#             self.write_species()
#         if not self.meta_summary_tsv.exists():
#             self.write_summary()
#         self.write_all_genome_fasta()


#     @cached_property
#     def summary_tsv(self) -> Path:
#         return self.outdir / "meta.summary.tsv"

#     @cached_property
#     def species_tsv(self) -> Path:
#         return self.outdir / "meta.species.tsv"

#     @cached_property
#     def contig_tsv(self) -> Path:
#         return self.outdir / "meta.contig.tsv"

#     def write_species(self) -> pl.DataFrame:
#         import pytaxonkit

#         dataframes = []
#         for summary_file in self.assembly_summary_dir.glob("*.assembly_summary.txt"):
#             logger.info("Read %s", summary_file)
#             organism_type = summary_file.name.split(".", 1)[0]
#             df = pl.read_csv(summary_file, separator="\t", comment_prefix="##", columns=["taxid"]).cast(pl.String).with_columns(pl.lit(organism_type).alias("organism_type"))
#             dataframes.append(df)
#         records: pl.DataFrame = pl.concat(dataframes)
#         records.write_csv("tt.tsv", separator="\t")
#         records = records.unique(["taxid", "organism_type"])
#         taxid_series = records["taxid"].unique()
#         logger.info("Run TaxonKit: total %d", taxid_series.len())
#         dataframes = []
#         step = 5000
#         for start in range(0, taxid_series.len(), step):
#             logger.info("Run TaxonKit: %d-%d", start + 1, start + step)
#             df = (
#                 pl.DataFrame(pl.from_pandas(pytaxonkit.lineage(taxid_series[start : start + step].to_list(), data_dir=self.taxdump_dir)))
#                 .cast(pl.String)
#                 .rename({"TaxID": "taxid", "Name": "name", "Rank": "rank"})
#                 .with_columns(pl.col("LineageTaxIDs").str.split(";").alias("lineage_taxids"))
#                 .with_columns(pl.col("Lineage").str.split(";").alias("lineages"))
#                 .with_columns(pl.col("lineage_taxids").list.get(-1).replace("", None).fill_null(pl.col("taxid")).alias("species_taxid"))
#                 .with_columns(pl.col("lineage_taxids").list.get(-2).replace("", "-").alias("genus_taxid"))
#                 .with_columns(pl.col("lineages").list.get(-1).replace("", None).fill_null(pl.col("name")).alias("species_name"))
#                 .with_columns(pl.col("lineages").list.get(-2).replace("", "-").alias("genus_name"))
#                 .select(["taxid", "name", "species_taxid", "species_name", "genus_taxid", "genus_name", "rank"])
#             )
#             dataframes.append(df)
#         genus_records: pl.DataFrame = pl.concat(dataframes)
#         records = (
#             records.join(genus_records, on="taxid", how="left")
#             .fill_null("-")
#             .select(["taxid", "name", "species_taxid", "species_name", "genus_taxid", "genus_name", "organism_type", "rank"])
#         )
#         logger.info("write %s", self.species_tsv)
#         records.write_csv(self.species_tsv, separator="\t")

#     def write_summary(self) -> pl.DataFrame:
#         logger.info("Read %s", self.species_tsv)
#         species_records = pl.read_csv(self.species_tsv, separator="\t").cast(pl.String)
#         taxid_dict = dict(map(lambda x: (x, True), species_records["taxid"]))
#         logger.info("Read %s", self.accession2taxid_tsv)
#         with gzip.open(self.accession2taxid_tsv, "rt") as reader:
#             records = pl.DataFrame(filter(lambda x: taxid_dict.get(x[1]), map(lambda y: y.strip().split("\t")[1:3], reader)), schema={"accession": str, "taxid": str})
#             records = records.join(species_records, on="taxid", how="left")
#             records.write_csv(self.summary_tsv, separator="\t")

#     def write_genome_fasta(self):
#         logger.info("Read %s", self.summary_tsv)
#         accession_records = pl.read_csv(self.summary_tsv, separator="\t", columns=["accession", "organism_type"], null_values=["-"])
#         accession_dict = dict(zip(accession_records["accession"], accession_records["organism_type"]))
#         logger.info("Read %s", self.nt_genome_fasta)
#         writers: dict[str, IO] = {}
#         with gzip.open(self.nt_genome_fasta, "rt") as reader:
#             title, seq, acc = "", "", ""
#             for line in reader:
#                 if line[0] == ">":
#                     if acc:
#                         organism_type = accession_dict.get(acc)
#                         if organism_type:
#                             if not writers.get(organism_type):
#                                 writers[organism_type] = gzip.open(self.outdir / f"{organism_type}.genome.fasta.gz", "wt")
#                             writers[organism_type].write(f"{title}{seq}")
#                     title, seq, acc = line, "", line[1:].split(" ", 1)[0]
#                 else:
#                     seq += line

#             if acc:
#                 organism_type = accession_dict.get(acc)
#                 if organism_type:
#                     if not writers.get(organism_type):
#                         writers[organism_type] = gzip.open(self.outdir / f"{organism_type}.genome.fasta.gz", "wt")
#                     writers[organism_type].write(f"{title}{seq}")
#             logger.info("Written genome fasta")

#     def run(self):
#         # if not self.species_tsv.exists():
#         #     self.write_species()
#         # if not self.summary_tsv.exists():
#         #     self.write_summary()
#         self.write_genome_fasta()
