import torch
import atexit
from collections import defaultdict
import numpy as np  # 可选，如果想用numpy做更复杂的统计或绘图


class VariableRecorder:
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super().__new__(cls, *args, **kwargs)
        return cls._instance

    def __init__(self, enabled=True, default_percentiles=(0.25, 0.5, 0.75)):
        # __init__ might be called multiple times with singleton if not careful,
        # so we use a flag to ensure one-time initialization.
        if not hasattr(self, "_initialized"):
            self.data: defaultdict[str, list[torch.Tensor]] = defaultdict(list)
            self.enabled = enabled
            self.default_percentiles = default_percentiles
            atexit.register(self.report_stats)
            if self.enabled:
                print(
                    "VariableRecorder initialized. Statistics will be reported at script exit."
                )
            else:
                print(
                    "VariableRecorder initialized but DISABLED. No data will be recorded or reported unless enabled."
                )
            self._initialized = True
        self.transfer_stream = torch.cuda.Stream(priority=1)

    def record(self, name: str, var: torch.Tensor, record_full_tensor_values=True):
        """
        Records the variable.

        Args:
            name (str): Name of the variable to record.
            var (torch.Tensor): The tensor variable to record.
            record_full_tensor_values (bool):
                If True (default), flattens the tensor and records all its values.
                This is useful for getting a distribution of all elements.
                If False, records only the mean of the tensor. This can save memory
                if you are recording very large tensors frequently and only care about
                their average behavior.
        """
        if not self.enabled:
            return
        if var.numel() == 0:
            # print(f"Warning: Variable '{name}' is empty. Skipping.")
            return
        if not isinstance(var, torch.Tensor):
            try:
                # Try to convert common types like Python numbers to a tensor
                var_tensor = torch.tensor(var, device="cpu", dtype=torch.bfloat16)
            except Exception as e:
                print(
                    f"Warning: Variable '{name}' is not a PyTorch tensor and could not be converted. Type: {type(var)}. Error: {e}"
                )
                return
        else:
            # if var_tensor.numel() == 0:
            #     # print(f"Warning: Variable '{name}' is empty. Skipping.")
            #     return

            if record_full_tensor_values:
                # Store all values from the tensor (flattened)
                # Convert to Python list of bfloat16s to minimize tensor overhead in storage
                with torch.cuda.stream(self.transfer_stream):
                    var_tensor = (
                        var.detach().bfloat16().to("cpu", non_blocking=True)
                    )  # Detach from graph and move to CPU

                self.data[name].extend(var_tensor)
            else:
                # Store only the mean of the tensor
                self.data[name].append(var_tensor.mean().item())

    def report_stats(self):
        torch.set_printoptions(precision=16)
        if not self.enabled or not self.data:
            if self.enabled and not self.data:
                print("\n--- Variable Statistics Report ---")
                print("No variables were recorded.")
                print("--- End of Report ---")
            return

        print("\n--- Variable Statistics Report ---")
        self.transfer_stream.synchronize()
        for name, values_list in self.data.items():
            if not values_list:
                print(f"\nVariable: '{name}'")
                print("  No data recorded.")
                continue

            # Convert list of Python floats back to a tensor for efficient computation
            data_tensor = torch.concat(list(i.flatten() for i in values_list))

            print(
                f"\nVariable: '{name}' (Distribution of {data_tensor.numel()} values)"
            )

            if data_tensor.numel() == 0:
                print("  No numerical data to report.")
                continue

            mean_val = torch.mean(data_tensor)
            # std_val = torch.std(data_tensor)
            min_val = torch.min(data_tensor)
            max_val = torch.max(data_tensor)

            print(f"  Count:    {data_tensor.numel()}")
            print(f"  Mean:     {mean_val.item():.12f}")
            # print(f"  Std Dev:  {std_val.item():.6f}")
            print(f"  Min:      {min_val.item():.12f}")
            print(f"  Max:      {max_val.item():.12f}")

            # if self.default_percentiles:
            #     try:
            #         # torch.quantile expects a 1D tensor for the q argument
            #         quantiles = torch.quantile(
            #             data_tensor,
            #             torch.tensor(self.default_percentiles, dtype=torch.bfloat16),
            #         )
            #         for q_val, p_val in zip(quantiles, self.default_percentiles):
            #             print(f"  {int(p_val * 100)}th Pctl: {q_val.item():.6f}")
            #     except Exception as e:
            #         print(f"  Could not compute percentiles: {e}")

            # Optional: If you want to use numpy for more advanced stats or plotting (e.g., histogram)
            # data_numpy = data_tensor.numpy()
            # hist, bin_edges = np.histogram(data_numpy, bins='auto')
            # print(f"  Histogram (bins): {bin_edges.tolist()}")
            # print(f"  Histogram (counts): {hist.tolist()}")

        print("--- End of Report ---")

    def clear(self):
        """Clears all recorded data."""
        self.data = defaultdict(list)
        print("VariableRecorder data cleared.")

    def enable_recording(self):
        """Enables recording and reporting."""
        if not self.enabled:
            self.enabled = True
            print("VariableRecorder ENABLED.")

    def disable_recording(self):
        """Disables recording and reporting. Data will not be collected or printed."""
        if self.enabled:
            self.enabled = False
            print("VariableRecorder DISABLED.")


# Global instance, accessible from anywhere
# You can pass enabled=False if you want to disable it by default
# And then enable it programmatically later if needed.
# variable_recorder = VariableRecorder(enabled=True)
# Or, more commonly:
variable_recorder = None
# variable_recorder = VariableRecorder()


# --- Example Usage ---
if __name__ == "__main__":
    # 0. Optionally disable/enable recorder globally if needed
    # variable_recorder.disable_recording()
    # variable_recorder.enable_recording()

    # 1. Define a dummy model or use your actual model
    class MyModel(torch.nn.Module):
        def __init__(self):
            super().__init__()
            self.linear1 = torch.nn.Linear(10, 20)
            self.relu = torch.nn.ReLU()
            self.linear2 = torch.nn.Linear(20, 5)
            self.counter = 0

        def forward(self, x):
            self.counter += 1
            # Record input tensor's distribution
            variable_recorder.record("model_input_dist", x)

            # Record mean of input tensor (alternative for very large tensors)
            variable_recorder.record(
                "model_input_mean", x, record_full_tensor_values=False
            )

            intermediate1 = self.linear1(x)
            # Record intermediate variable (e.g., activations before ReLU)
            # All elements of intermediate1 will be collected over multiple calls
            variable_recorder.record("linear1_output_dist", intermediate1)

            intermediate2 = self.relu(intermediate1)
            # Record another intermediate variable (e.g., activations after ReLU)
            variable_recorder.record("relu_output_dist", intermediate2)

            # Example: Record a derived scalar value, like sparsity
            sparsity = (intermediate2 == 0).bfloat16().mean()  # This is a 0-dim tensor
            variable_recorder.record(
                "relu_output_sparsity", sparsity
            )  # Will be treated as a single value

            # Example: Record a Python number (will be converted to tensor)
            python_number_metric = self.counter * 0.1
            variable_recorder.record("python_metric_example", python_number_metric)

            output = self.linear2(intermediate2)
            # Record model output
            variable_recorder.record("model_output_dist", output)

            return output

    # 2. Simulate multi-round inference
    model = MyModel()
    if torch.cuda.is_available():
        model.to("cuda")
        print("Model moved to CUDA")

    num_rounds = 5
    batch_size = 3
    input_features = 10

    print(f"\nSimulating {num_rounds} inference rounds...\n")
    for i in range(num_rounds):
        print(f"Round {i + 1}/{num_rounds}")
        # Create dummy input data
        # Ensure data is on the same device as the model if it's a GPU model
        if torch.cuda.is_available():
            dummy_input = torch.randn(batch_size, input_features, device="cuda") * (
                i + 1
            )  # Vary data a bit
        else:
            dummy_input = torch.randn(batch_size, input_features) * (i + 1)

        # Perform inference
        with torch.no_grad():  # Important for inference
            _ = model(dummy_input)

        # Optional: Clear data between rounds if you only want stats for the last round
        # or specific segments. For cumulative stats, do not clear.
        # if i == 2:
        #     variable_recorder.clear() # Example: clear after 3 rounds
        #     print("Cleared recorder data after round 3")

    # 3. At the end of the script, the report_stats method will be called automatically
    # No explicit call to variable_recorder.report_stats() is needed here.
    print("\nScript finished. Statistics report will follow if recorder was enabled.")

    # If you want to trigger report earlier or multiple times (e.g. in a long running script)
    # you can call it manually. Note that atexit will still call it at the very end.
    # variable_recorder.report_stats()
