import torch
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode


# 自定义的 FakeTensor 模式用于内存跟踪
class FakeTensorMemoryProfilerMode(FakeTensorMode):
    def __init__(self):
        super().__init__()
        self.memory_use = 0
        self.max_memory = 0
        self.storage_count = {}

    def __torch_dispatch__(self, func, types, args=(), kwargs=None):
        result = super().__torch_dispatch__(func, types, args, kwargs)
        self.track_tensor_memory_use(result)
        return result

    def track_tensor_memory_use(self, result):
        # 处理多个张量的情况
        if isinstance(result, (tuple, list)):
            for res in result:
                self._track_single_tensor(res)
        else:
            self._track_single_tensor(result)

    def _track_single_tensor(self, tensor):
        if isinstance(tensor, torch.Tensor) and tensor.is_sparse is False:
            if isinstance(tensor, FakeTensor):
                storage = tensor.untyped_storage()
                nbytes = storage.nbytes()
                storage_id = id(storage)
                if storage_id not in self.storage_count:
                    self.memory_use += nbytes
                    self.max_memory = max(self.memory_use, self.max_memory)
                    self.storage_count[storage_id] = 1
                else:
                    self.storage_count[storage_id] += 1

    def get_max_memory_usage(self):
        return self.max_memory


# 设置CUDA设备
torch.cuda.set_device(0)
allocated_memory = torch.cuda.memory_allocated()
print(f"Allocated memory after intermediate tensors: {allocated_memory / 1024 ** 2:.5f} MB")

tensor_a = torch.rand(10000, 10000, device='cuda')
tensor_b = torch.rand(10000, 10000, device='cuda')
tensor_c = tensor_a + tensor_b
tensor_d = tensor_a * tensor_b
tensor_e = torch.matmul(tensor_c, tensor_d)

a_memory = tensor_a.untyped_storage().nbytes()
b_memory = tensor_b.untyped_storage().nbytes()
c_memory = tensor_c.untyped_storage().nbytes()
d_memory = tensor_d.untyped_storage().nbytes()
e_memory = tensor_e.untyped_storage().nbytes()
total_memory = allocated_memory + a_memory + b_memory + c_memory + d_memory + e_memory
print(f"Total memory usage: {total_memory / 1024 ** 2:.5f} MB")

cache_memory = torch.cuda.memory_reserved()
print(f"cached memory: {cache_memory / 1024 ** 2:.5f} MB")
allocated_memory = torch.cuda.memory_allocated()
print(f"Allocated memory after intermediate tensors: {allocated_memory / 1024 ** 2:.5f} MB")

# 使用 FakeTensorMode 环境
with FakeTensorMemoryProfilerMode() as ftmp:
    # 创建一些普通的张量
    tensor_a = torch.rand(10000, 10000, device='cuda')
    tensor_b = torch.rand(10000, 10000, device='cuda')

    # 强制同步，确保所有CUDA操作完成
    torch.cuda.synchronize()

    # 打印真实张量的内存使用情况
    real_tensor_a_memory = tensor_a.untyped_storage().nbytes()
    real_tensor_b_memory = tensor_b.untyped_storage().nbytes()

    # 执行一些操作，产生中间张量
    tensor_c = tensor_a + tensor_b
    tensor_d = tensor_a * tensor_b
    tensor_e = torch.matmul(tensor_c, tensor_d)

    # 强制同步，确保所有CUDA操作完成
    torch.cuda.synchronize()

    # 将中间张量转换为 FakeTensor 并跟踪内存
    fake_tensor_a = ftmp.from_tensor(tensor_a)
    fake_tensor_b = ftmp.from_tensor(tensor_b)
    fake_tensor_c = ftmp.from_tensor(tensor_c)
    fake_tensor_d = ftmp.from_tensor(tensor_d)
    fake_tensor_e = ftmp.from_tensor(tensor_e)
    a_memory = fake_tensor_a.untyped_storage().nbytes()
    b_memory = fake_tensor_b.untyped_storage().nbytes()
    c_memory = fake_tensor_c.untyped_storage().nbytes()
    d_memory = fake_tensor_d.untyped_storage().nbytes()
    e_memory = fake_tensor_e.untyped_storage().nbytes()

    total_memory = a_memory + b_memory + c_memory + d_memory + e_memory
    print(f"FakeTensor memory usage: {total_memory / 1024 ** 2:.5f} MB")


