import torch
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode


# 自定义的 FakeTensor 模式用于内存跟踪
class FakeTensorMemoryProfilerMode(FakeTensorMode):
    def __init__(self):
        super().__init__()
        self.memory_use = 0
        self.max_memory = 0
        self.storage_count = {}

    def __torch_dispatch__(self, func, types, args=(), kwargs=None):
        result = func(*args, **kwargs)
        self.track_tensor_memory_use(result)
        return result

    def track_tensor_memory_use(self, tensor):
        if isinstance(tensor, FakeTensor):
            nbytes = tensor.untyped_storage().nbytes()
            storage_id = id(tensor.untyped_storage())

            if storage_id not in self.storage_count:
                self.memory_use += nbytes
                self.max_memory = max(self.memory_use, self.max_memory)
                self.storage_count[storage_id] = 1
            else:
                self.storage_count[storage_id] += 1

    def get_max_memory_usage(self):
        return self.max_memory


# 使用 FakeTensorMode 环境
with FakeTensorMemoryProfilerMode() as ftmp:
    # 创建一个普通的张量
    tensor = torch.rand(10000, 10000, device='cuda')

    # 打印真实张量的内存使用情况
    real_tensor_memory = tensor.untyped_storage().nbytes()
    print(f"Real tensor memory usage: {real_tensor_memory / 1024 ** 2:.5f} MB")

    # 将张量转换为 FakeTensor
    fake_tensor = ftmp.from_tensor(tensor)

    # 跟踪 FakeTensor 的内存使用
    ftmp.track_tensor_memory_use(fake_tensor)

    # 获取 FakeTensor 的最大内存使用
    max_memory = ftmp.get_max_memory_usage()
    print(f"Max memory used by FakeTensor: {max_memory / 1024 ** 2:.5f} MB")

    allocated_memory = torch.cuda.memory_allocated()
    print(f"Allocated memory: {allocated_memory / 1024 ** 2:.5f} MB")