# -*- coding: utf-8 -*-
import os
import typing
from collections import defaultdict
from typing import Any, Callable, Mapping, Optional, Sequence, Union

import h5py
import numpy as np
from astropy.io import fits
from jsonargparse.typing import Path_fc, Path_fr
from pydantic import BaseModel, Field

from f2h5.config import DEFAULT_DATASET_KWARGS, SRC_ROOT
from f2h5.utils import import_class

if typing.TYPE_CHECKING:
    from astropy.io.fits.column import Column  # noqa: F401
    from astropy.io.fits.hdu.image import PrimaryHDU
    from astropy.io.fits.hdu.table import BinTableHDU
    from h5py import Group


v_print: Callable | None = None


def convert(
        fits_files: Path_fr | list[Path_fr],
        output_file: Path_fc,
        mode: str = "a",
        dataset_kwargs: Optional[Mapping[str, dict]] = None,
        verbose: bool = False,
) -> None:
    """Convert multiple FITS files to a HDF5 file.

    Each FITS file will be converted into a group in HDF5 and stored under the root group of the output HDF5 file.

    Args:
        fits_files (list): List of FITS files to be converted
        output_file (str): Output HDF5 file
        mode (str): Mode for opening the output HDF5 file
        dataset_kwargs (dict): Keyword arguments to use when creating HDF5 datasets
        verbose (bool): Output detailed information
    """

    def _v_print(*args, **kwargs):
        if verbose:
            print(*args, **kwargs)

    global v_print
    v_print = _v_print

    if dataset_kwargs is None:
        dataset_kwargs = DEFAULT_DATASET_KWARGS

    ds_kwargs = {}
    if dataset_kwargs is not None:
        for hdu_name, hdu_val in dataset_kwargs.items():
            ds_kwargs[hdu_name] = {}
            for k, v in hdu_val.items():
                ds_kwargs[hdu_name][k] = DatasetKwargs(**v).as_kwargs()

    add_fits_to_hdf5(output_file, fits_files, ds_kwargs, mode)


def print_default_config():
    import subprocess

    config_file = os.path.join(SRC_ROOT, "cfgs", "conf.yaml")
    subprocess.run(["python", __file__, "--config", config_file, "--print_config"])  # noqa: S607 S603


class Instantiable(BaseModel):
    cur_can_instantiate: bool = Field(False, init=False, repr=False, exclude=True)

    def instantiate_classes(self):
        if self.cur_can_instantiate:
            class_path = self.class_path
            kwargs = getattr(self, "init_args", {})
            # kwargs = instantiate_classes(kwargs)
            klass = import_class(class_path)
            return klass(**kwargs)

        for field_name in self.__pydantic_fields__:
            if field_name == "__cur_can_instantiate":
                continue
            field = getattr(self, field_name)
            setattr(self, field_name, instantiate_classes(field))
        return self


def instantiate_classes(obj: Any):
    if isinstance(obj, Instantiable):
        return obj.instantiate_classes()
    elif isinstance(obj, Mapping):
        # noinspection PyArgumentList
        return obj.__class__((k, instantiate_classes(v)) for k, v in obj.items())
    elif isinstance(obj, (list, tuple)):
        return obj.__class__(instantiate_classes(v) for v in obj)
    else:
        return obj


class H5PluginFilterKwargs(Instantiable):
    """
    A dataclass that represents the kwargs for filtering a dataset in HDF5.
    Currently only kwargs about compression are supported.
    """
    class_path: str
    init_args: Optional[Mapping[str, Any]] = None
    cur_can_instantiate: bool = Field(True, init=False, repr=False, exclude=True)


class FilterKwargs(Instantiable):
    """
    A dataclass that represents the kwargs for filtering a dataset in HDF5.
    Currently only kwargs about compression are supported.
    """
    compression: Optional[H5PluginFilterKwargs | Any] = None
    compression_opts: Optional[Any] = None


class DatasetKwargs(Instantiable):
    """
    A dataclass that represents the kwargs for creating a dataset in HDF5.
    """

    chunks: Optional[int | Sequence[int] | bool] = None
    filter: Optional[FilterKwargs] = None
    shuffle: Optional[bool] = None

    def as_kwargs(self, exclude_none: bool = True, exclude_unset: bool = True) -> dict:
        d = self.instantiate_classes().model_dump(exclude_none=exclude_none, exclude_unset=exclude_unset)
        if "filter" in d:
            filter_ = d.pop("filter")
            d.update(filter_)
        return d


def add_fits_to_hdf5(
        hdf5_file: str,
        fits_files: str | list[str],
        dataset_kwargs: Optional[Mapping[str, dict]] = None,
        mode: str = "a",
) -> None:
    if isinstance(fits_files, (Path_fr, str)):
        fits_files = [fits_files]

    with h5py.File(hdf5_file, mode) as fp:
        for f, grp in zip(fits_files, gen_unique_group_name(fits_files), strict=True):
            group = fp.create_group(grp)
            print(f"Adding {f} to group {group.name!r} in file {hdf5_file!r}")
            read_to_hdf5_group(group, f, dataset_kwargs)


def read_to_hdf5_group(group: "Group", fits_file: str, dataset_kwargs: Optional[Mapping[str, dict]] = None) -> None:
    """
    Reads a FITS file and writes the data to a HDF5 group.

    :param group: the group to write to
    :param fits_file: the fits file to read from
    :param dataset_kwargs: keyword arguments for creating HDF5 datasets

    :return None
    """

    hdus = fits.open(fits_file)
    assert len(hdus) == 2 and hdus[0].data is None and hdus[1].data is not None

    for hdu in hdus:
        hdu_group = group.create_group(hdu.name)
        ds_kwargs = dataset_kwargs.get(hdu.name, {}) if dataset_kwargs is not None else {}
        v_print(f"Converting HDU {hdu.name!r} and add it to {hdu_group.name}")
        hdu_to_hdf5_group(hdu_group, hdu, ds_kwargs)


def hdu_to_hdf5_group(
        group: "Group",
        hdu: Union["BinTableHDU", "PrimaryHDU"],
        dataset_kwargs: Optional[Mapping[str, dict]] = None
) -> None:
    if dataset_kwargs is None:
        dataset_kwargs = {}

    # 将 HDU 的元数据添加到 group 的 attrs
    group.attrs["__name__"] = hdu.name
    group.attrs["__version__"] = hdu.ver
    group.attrs["__class__"] = hdu.__class__.__name__

    # 将 header 中的键值对添加到 group 的 attrs
    attributes = dict(hdu.header)
    comment = attributes.pop("COMMENT", None)
    if comment is not None:
        group.attrs["COMMENT"] = str(comment)
    group.attrs.update(attributes)

    if isinstance(hdu, fits.hdu.image.PrimaryHDU):
        # noinspection PyPropertyAccess
        assert hdu.data is None
        return

    # 对 HDU 的数据中每一列分别创建一个 dataset
    for column in hdu.columns:  # type: Column
        name = column.name
        assert name is not None  # 严格一点，要求每个 column 都有名字，虽然 fits 文件并不要求 Column 有名字，但FRB数据是有名字的。
        # noinspection PyPropertyAccess
        data = hdu.data.field(name)
        assert isinstance(data, np.ndarray)
        # 对于 FRB 数据, 有 17 列, 格式如下:
        # 128R x 17C ['1D', '1D', '1D', '1D', '1D', '1D', '1D', '1E', '1E', '1E', '1E', '1E', '4096E', '4096E',
        #             '16384E', '16384E', '16777216B']
        # 但是我发现，有更多数据，每一行都时一样的。 这里对这种情况进行检测，如果发现一样，就只保留第一行。但是保证它们可以广播到原始形状
        first_row = data[0:1]
        diff = data - first_row
        original_shape = None
        if np.allclose(diff, 0.0):
            original_shape = data.shape
            data: np.ndarray = first_row

        # 硬编码逻辑: 对很小的 array, 不使用 chunk, 压缩
        if data.size < 64:
            ds_kwargs = {"chunks": None}
        elif name == "DATA":
            shape = data.shape  # == (T1, T2, NPOL, NCHAN, 1)
            assert shape[-1] == 1
            original_shape = data.shape
            data = np.reshape(data, (shape[0] * shape[1], shape[2], shape[3]))

            # 我们期望, 每个 chunk 拥有相同的绝对时间长度和频率范围
            ref_tbin = 4.9152e-05
            ref_chunk_time = ref_tbin * 128
            tbin = hdu.header["TBIN"]
            time_chk_size = int(np.round(ref_chunk_time / tbin))

            npol = hdu.header["NPOL"]
            pol_chk_size = 1 if npol == 1 else 2

            ref_chunk_freq = 512
            ref_nchan = 4096
            nchan = hdu.header["NCHAN"]
            freq_chk_size = int(np.round(ref_chunk_freq * nchan / ref_nchan))

            ds_kwargs = dataset_kwargs.get(name, {"compression": "gzip", "compression_opts": 5})
            # noinspection PyTypeChecker
            ds_kwargs["chunks"] = (time_chk_size, pol_chk_size, freq_chk_size)
        else:
            ds_kwargs = dataset_kwargs.get(name, {})

        if (chunks := ds_kwargs.get("chunks", None)) is not None:
            if isinstance(chunks, int):
                chunks = (chunks,)
            if data.ndim != len(chunks):
                chunks = (1, ) * (data.ndim - len(chunks)) + chunks
            chunks = tuple(min(x, y) for x, y in zip(chunks, data.shape, strict=False))
            # noinspection PyTypeChecker
            ds_kwargs["chunks"] = chunks

        if data.itemsize > 1:
            data = data.astype(dtype=data.dtype.type, copy=False)  # 转换为 native byte order

        v_print(f"converting {name!r} column (shape = {'x'.join(map(str, data.shape))}) "
                f"and add it to a HDF5 dataset '{group.name}/{name}' with keyword arguments: {ds_kwargs}")
        dataset = group.create_dataset(name, data=data, **ds_kwargs)
        dataset[:] = data
        dataset.attrs["unit"] = str(column.unit)
        if original_shape:
            dataset.attrs["original_shape"] = original_shape


def gen_unique_group_name(files: list[str]) -> list[str]:
    counts = defaultdict(int)
    for f in files:
        name = os.path.basename(f)
        name, ext = os.path.splitext(name)
        if counts[name] > 0:
            name = f"{name}_{counts[name]}"
        counts[name] += 1
        yield name


def main():
    # see: https://github.com/omni-us/jsonargparse/issues/355#issuecomment-2600901284
    from jsonargparse import ArgumentParser, auto_cli

    class CustomArgumentParser(ArgumentParser):
        def add_argument(self, *args, **kwargs):
            if "type" in kwargs and kwargs["type"] is bool:
                kwargs.pop("type")
                activated = kwargs.pop("default", False)
                if activated:
                    kwargs["action"] = "store_false"
                else:
                    kwargs["action"] = "store_true"
            return super().add_argument(*args, **kwargs)

    auto_cli(convert, as_positional=False, parser_class=CustomArgumentParser)


def __dev():
    kwargs = H5PluginFilterKwargs(
        class_path="hdf5plugin.Blosc2",
        init_args={"cname": "zstd", "clevel": 1, "filters": 1},
    )
    print(f"H5PluginFilterKwargs: {kwargs}")
    print(f"H5PluginFilterKwargs: {kwargs.instantiate_classes()}")

    kwargs = FilterKwargs(
        compression={
            "class_path": "hdf5plugin.Blosc2",
            "init_args": {"cname": "zstd", "clevel": 1, "filters": 1},
        },
        # compression_opts=None,
    )
    print(f"FilterKwargs: {kwargs.model_dump(exclude_unset=True)}")
    tmp = kwargs.instantiate_classes().model_dump(exclude_none=True)
    print(type(tmp))
    print(f"FilterKwargs: {tmp}")

    print("=" * 50)
    kwargs = {
        "chunks": (3, 4),
        "filter": {
            "compression": {
                "class_path": "hdf5plugin.Blosc2",
                "init_args": {"cname": "zstd", "clevel": 1, "filters": 1},
            },
        },
        "shuffle": True,
    }
    kwargs = DatasetKwargs.model_validate(kwargs)
    print(kwargs)
    print(kwargs.as_kwargs())


if __name__ == "__main__":
    # __dev()
    main()
