import argparse
from functools import cached_property
import re
import logging
from concurrent import futures
from typing import Any, Generator
from urllib.parse import quote
from pathlib import Path
from pysam import FastaFile, VariantHeader, tabix_compress, tabix_index  # pylint: disable=E0611
from pre_base import set_parser, PrepareBase, logger

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("Prepare File")


class PreparedbNSFP(PrepareBase):
    input_file: Path

    def iter_reference(self) -> Generator[tuple[str, int, list[tuple[int, int]]], Any, None]:
        data = {}
        with open(self.input_file.with_suffix(".txt.idx")) as reader:
            for row in map(lambda x: x.strip().split("\t"), filter(lambda y: not y.startswith("#"), reader)):
                data.setdefault(row[0], []).append((int(row[2]), int(row[3])))
        with FastaFile(str(self.reference_file)) as reader:
            for chrom, size in filter(lambda x: len(x[0]) <= 5 and x[0] not in ["M", "MT", "chrM"], zip(reader.references, reader.lengths)):
                if data.get(chrom):
                    yield chrom, size, data[chrom]

    @cached_property
    def columns(self) -> list[str]:
        with open(self.input_file) as reader:
            return list(map(lambda x: re.sub(r"\W+", "", x), reader.readline().strip().split("\t")))

    def header(self, chrom: str, size: int) -> VariantHeader:
        header = VariantHeader()
        header.contigs.add(id=chrom, length=size)
        for name in filter(lambda x: x.endswith("_score") or x.startswith("GERP") or x.endswith("_pred") or x == "Interpro_domain", self.columns):
            header.info.add(id=name, type="Float" if name.endswith("_score") or name.startswith("GERP") else "String", number=1, description=name)
        return header

    def process_chrom(self, chrom: str, size: int, intervals: list[tuple[int, int]]):
        output_vcf = self.outdir / f"{chrom}.vcf"
        output_gz = output_vcf.with_suffix(".vcf.gz")
        if not output_gz.exists():
            with open(self.input_file) as reader, open(output_vcf, "w") as writer:
                writer.write(str(self.header(chrom, size)))
                for start, end in intervals:
                    reader.seek(start, 0)
                    for row in map(lambda x: x.strip().split("\t"), reader.readlines(end - start)):
                        chrom, pos, ref, alt = row[0], int(row[1]), row[3], row[4]
                        info = {}
                        for name, val in zip(self.columns[5:], row[5:]):
                            # 只是使用部分的列
                            if name.endswith("_score") or name.startswith("GERP") or name.endswith("_pred") or name == "Interpro_domain":
                                # 判断空值
                                val = re.sub(r"^[\.\|;]+$", "", val).strip(";")
                                if val:
                                    # 处理 Interpro_domain 的特殊符号
                                    val = quote(val) if name == "Interpro_domain" else val.strip(";").replace(";", "|")
                                    # 判断数值类型的有效性，即score和GERP必须为整形或浮点型
                                    if (name.endswith("_score") or name.startswith("GERP")) and not re.match(r"^-?\d+(\.\d+)?([eE][-+]?\d+)?$", val):
                                        continue
                                    info.setdefault(name, val)
                        if info:
                            info_text = ";".join(list(map(lambda x: f"{x[0]}={x[1]}", info.items())))
                            writer.write(f"{chrom}\t{pos}\t.\t{ref}\t{alt}\t.\t.\t{info_text}\n")
                    logger.info("Processed %s:%s:%s", chrom, f"{start:,}", f"{end:,}")
            tabix_compress(str(output_vcf), str(output_gz), force=True)
            tabix_index(str(output_gz), preset="vcf", force=True)
            output_vcf.unlink(missing_ok=True)
            logger.info("processed %s", output_gz)

    def run(self):
        self.outdir.mkdir(parents=True, exist_ok=True)
        tasks = []
        with futures.ProcessPoolExecutor(max_workers=8) as executor:
            for chrom, size, intervals in self.iter_reference():
                # self.process_chrom(chrom, size, intervals)
                tasks.append(executor.submit(self.process_chrom, chrom, size, intervals))
        for task in tasks:
            task.result()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Prepare dbNSFP SNV")
    parser.add_argument("--input_file", "-i", type=Path, required=True, help="input, dbNSFP(42a) file")
    set_parser(parser=parser)
    parser.set_defaults(func=lambda args: PreparedbNSFP.model_validate(vars(args)).run())
    args = parser.parse_args()
    args.func(args)
