from functools import cached_property
import gzip
import logging
from pathlib import Path
from typing import Any, Literal
from typing_extensions import Self
import polars as pl
from pydantic import BaseModel
from pysam import FastaFile  # pylint: disable=E0611
from .transcript import Transcript


logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("OpenPVS1")


class BEDFrame(pl.DataFrame):

    def fetch_row(self, chrom: str, start: int, end: int) -> dict[str, Any]:
        records = self.filter(pl.col("chrom").eq(chrom) & pl.col("start").le(end) & pl.col("end").ge(start))
        return {} if records.is_empty() else records.row(0, named=True)

    @classmethod
    def read_bed(cls, bed_file: str | Path) -> Self:
        records = pl.read_csv(bed_file, has_header=False, separator="\t", comment_prefix="#")
        records = records.rename({"column_1": "chrom", "column_2": "start", "column_3": "end"})
        if records.shape[1] >= 12:
            records = records.rename({"column_11": "block_size", "column_12": "block_start", "column_4": "name"})
            records = (
                records.with_columns(
                    pl.col("block_size").str.strip_chars(",").str.split(",").cast(pl.List(pl.Int64)).alias("block_size"),
                    pl.col("block_start").str.strip_chars(",").str.split(",").cast(pl.List(pl.Int64)).alias("block_start"),
                )
                .explode(["block_size", "block_start"])
                .with_columns((pl.col("block_start") + pl.col("start")).alias("start"))
                .with_columns((pl.col("block_size") + pl.col("start")).alias("end"))
                .with_columns(pl.format("{}|{}-{}", pl.col("name"), pl.col("start"), pl.col("end")).alias("name"))
            )
        else:
            if records.shape[1] > 3:
                records = records.rename({"column_4": "name"})
            else:
                records = records.with_columns(pl.format("{}:{}-{}", pl.col("chrom"), pl.col("start"), pl.col("end")).alias("name"))
        return cls(records.select(["chrom", "start", "end", "name"]))


class LoadData(BaseModel):
    data_dir: Path
    genome_builder: Literal["hg19", "hg38"]
    genome_file: Path

    @cached_property
    def assembly(self) -> Literal["GRCh37", "GRCh38"]:
        return "GRCh37" if self.genome_builder == "hg19" else "GRCh38"

    @cached_property
    def maxent_score5_matrix_file(self) -> Path:
        return self.data_dir / "maxent_score5_matrix.txt.gz"

    @cached_property
    def maxent_score3_matrix_file(self) -> Path:
        return self.data_dir / "maxent_score3_matrix.txt.gz"

    @cached_property
    def exon_lof_popmax_file(self) -> Path:
        return self.data_dir / f"exon_lof_popmax_{self.genome_builder}.bed.gz"

    @cached_property
    def domain_file(self) -> Path:
        return self.data_dir / f"functional_domains_{self.genome_builder}.bed.gz"

    @cached_property
    def curated_region_file(self) -> Path:
        return self.data_dir / f"expert_curated_domains_{self.genome_builder}.bed.gz"

    @cached_property
    def hotspot_file(self) -> Path:
        return self.data_dir / f"mutational_hotspots_{self.genome_builder}.bed.gz"

    @cached_property
    def pathogenic_file(self) -> Path:
        return self.data_dir / f"clinvar_pathogenic_{self.assembly}.vcf.gz"

    @cached_property
    def pvs1_level_file(self) -> Path:
        return self.data_dir / "PVS1.level.gz"

    @cached_property
    def ncbi_refseq_file(self) -> Path:
        return self.data_dir / f"ncbiRefSeq_{self.genome_builder}.gpe.gz"

    @cached_property
    def maxent_matrix5(self) -> dict[str, float]:
        logger.info("Load MaxEnt matrix")
        with gzip.open(self.maxent_score5_matrix_file, "rt") as reader:
            return dict(map(lambda x: (x[0], float(x[1])), map(lambda y: y.strip().split(), reader)))

    @cached_property
    def maxent_matrix3(self) -> dict[int, dict[int, float]]:
        with gzip.open(self.maxent_score3_matrix_file, "rt") as reader:
            matrix = {}
            for n, m, s in map(lambda x: x.strip().split(), reader):
                matrix.setdefault(int(n), {}).update({int(m): float(s)})
            return matrix

    @cached_property
    def exon_lof_popmax_records(self) -> BEDFrame:
        logger.info("Load exon lof popmax data")
        return BEDFrame.read_bed(self.exon_lof_popmax_file)

    @cached_property
    def domain_records(self):
        logger.info("Load domain data")
        return BEDFrame.read_bed(self.domain_file)

    @cached_property
    def curated_region_records(self) -> BEDFrame:
        logger.info("Load curated data")
        return BEDFrame.read_bed(self.curated_region_file)

    @cached_property
    def hotspot_records(self) -> BEDFrame:
        logger.info("Load hotspot data")
        return BEDFrame.read_bed(self.hotspot_file)

    @cached_property
    def pathogenic_records(self) -> pl.DataFrame:
        logger.info("Load pathogenic data")
        records = pl.read_csv(
            self.pathogenic_file, has_header=False, separator="\t", comment_prefix="#", columns=[0, 1, 14], new_columns=["chrom", "pos", "star"], schema_overrides={"chrom": str}
        )
        records = (
            records.with_columns(
                pl.format("chr{}", pl.col("chrom")).alias("chrom"),
                pl.col("star").replace([0, 1, 2, 3, 4], [1 / 3, 1 / 2, 1, 1, 1]).cast(pl.Float64).alias("score"),
            )
            .group_by(["chrom", "pos"])
            .agg(pl.col("score").sum().alias("score"), pl.len().alias("count"))
        )
        return records

    @cached_property
    def transcripts(self):
        logger.info("Load transcript data")
        column_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 15]
        column_names = ["name", "chrom", "strand", "tx_start", "tx_end", "cds_start", "cds_end", "exon_count", "exon_starts", "exon_ends", "gene", "exon_frames"]
        records = pl.read_csv(self.ncbi_refseq_file, separator="\t", has_header=False, columns=column_numbers, new_columns=column_names)
        records = records.with_columns(pl.col("name").str.split(".").list.first().alias("name"))
        for column in ["exon_starts", "exon_ends", "exon_frames"]:
            records = records.with_columns(pl.col(column).str.strip_chars(",").str.split(",").cast(pl.List(pl.Int64)).alias(column))
        records = records.join(pl.read_csv(self.pvs1_level_file, has_header=False, separator="\t", new_columns=["gene", "level"]), on="gene", how="left").fill_null(".")
        transcripts = dict(map(lambda x: (f"{x['chrom']}:{x['name']}", Transcript.model_validate(x)), records.iter_rows(named=True)))
        return transcripts

    @cached_property
    def genome_fasta(self):
        logger.info("Load genome data")
        return FastaFile(self.genome_file)
