from dataclasses import dataclass, field
from typing import Optional
from pathlib import Path


@dataclass
class QATArguments:
    mode: str = field(
        default="qat",
        metadata={"help": "Mode"},
    )
    model_path: Path = field(
        default=Path("model/openvla"), metadata={"help": "trained model to this dir"}
    )

    data_root_dir: Path = field(
        default=Path("/home/wenhongli/openvla"),
        metadata={"help": "Path to Open-X dataset directory"},
    )

    dataset_name: str = field(
        default="casia_franka",
        metadata={"help": "Name of fine-tuning dataset (e.g., `droid_wipe`)"},
    )

    run_root_dir: Path = field(
        default=Path("runs"),
        metadata={"help": "Path to directory to store logs & checkpoints"},
    )

    image_aug: bool = field(
        default=True,
        metadata={"help": "Whether to train with image augmentations"},
    )

    shuffle_buffer_size: int = field(
        default=100_000,
        metadata={"help": "Dataloader shuffle buffer size (can reduce if OOM)"},
    )

    resume_dir: Path = field(
        default=None,
        metadata={"help": "Path to directory to store logs & checkpoints"},
    )  # type: ignore


@dataclass
class QuantArguments:
    act_bits: int = field(default=8, metadata={"help": "bitwidth for activation"})

    w_bits: int = field(default=4, metadata={"help": "bitwidth for weight"})

    integer_only: bool = field(
        default=True, metadata={"help": "use interger-only softmax or LN"}
    )


@dataclass
class OtherArguments:
    model_save_dir: str = field(
        default=None, metadata={"help": "save trained model to this dir"}
    )
    eval_fp: bool = field(default=False)
    eval_quant_fp: bool = field(default=False)
    eval_quant_int: bool = field(default=False)
    save_int_tensors: bool = field(default=False)
    save_tensors_path: str = field(
        default=None, metadata={"help": "save tensors for hardware"}
    )
    test: bool = field(default=False)


class QuantizationConfigArgs:
    def __init__(self, args: QuantArguments = QuantArguments()):
        self.w_bits = args.w_bits
        self.act_bits = args.act_bits

        # ___________________ Linear and MatMul __________________________
        self.weight_quant_params = {
            "n_bits": self.w_bits,
            "symmetric": False,
            "scale": "per_channel_weight",
            "zeropoint": "per_channel_weight",
            "dtype": "UINT",
            "enable": True,
            "no_clamp_init": True,
        }

        self.act_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_channel_activation",
            "dtype": "INT",
            "enable": True,
            "no_clamp_init": True,
        }

        self.conv_weight_quant_params = {
            "n_bits": self.w_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
        }

        # ___________________ Conv2d __________________________
        self.conv_act_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
        }

        # ___________________ RoPE __________________________
        self.pos_emb_quant_params = {
            "n_bits": self.act_bits - 1,
            "symmetric": True,
            "scale": "per_tensor",
            "dtype": "INT",
        }

        # ___________________ Layer Norm / RMSNorm __________________________
        self.layernorm_vit_input_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_tensor",
            # "scale": "per_channel_activation",
            "dtype": "INT",
            "enable": True,
            "no_clamp_init": False,
        }
        self.layernorm_input_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_tensor",
            # "scale": "per_channel_activation",
            "dtype": "INT",
            "enable": True,
            "no_clamp_init": False,
        }

        self.layernorm_output_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_channel_activation",
            "dtype": "INT",
            "enable": True,
            "no_clamp_init": False,
        }

        # ___________________ Attntion MatMul __________________________
        self.q_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_tensor",
            "quant_shape": (
                lambda x: x.transpose(1, 2).flatten(-2),
                lambda x, s: x.reshape(s[0], s[2], s[1], s[3]).transpose(1, 2),
            ),
            "dtype": "INT",
        }

        self.k_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_tensor",
            "quant_shape": (
                lambda x: x.flatten(1, 2).transpose(1, 2),
                lambda x, s: x.transpose(1, 2).reshape(s[0], s[1], s[2], s[3]),
            ),
            "dtype": "UINT",
        }

        self.p_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_tensor",
            "dtype": "INT",
        }

        self.v_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_channel_activation",
            "zeropoint": "per_channel_activation",
            "quant_shape": (
                lambda x: x.transpose(1, 2).flatten(-2),
                lambda x, s: x.reshape(s[0], s[2], s[1], s[3]).transpose(1, 2),
            ),
            "dtype": "UINT",
        }
        # ___________________ Softmax __________________________

        self.softmax_input_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_head_softmax",
            # "scale": "per_tensor",
            "dtype": "INT",
        }

        # ___________________ GELU __________________________

        self.gelu_input_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_tensor",
            "dtype": "INT",
        }

        # ___________________ MLP __________________________

        self.sigmoid_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_tensor",
            # "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
        }
        self.hadamard2_a1_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_channel_activation",
            # "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
        }
        self.hadamard1_a2_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_channel_activation",
            # "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
        }
        self.hadamard2_act_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": True,
            "scale": "per_channel_activation",
            # "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
        }

        self.mlp_act_quant_params = {
            "n_bits": self.act_bits,
            "symmetric": False,
            "scale": "per_tensor",
            "zeropoint": "per_tensor",
            "dtype": "INT",
            "enable": True,
            "no_clamp_init": True,
        }
