import torch
import torch.nn as nn
import torch_npu


# Define a simple model
class MyModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(128, 256)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(256, 10)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x


x = torch.randn(32, 128).npu()  # batch of 32 samples
print(f"x is in npu")
# Create model and sample input
model = MyModel().npu()


print(torch._dynamo.list_backends())
# Compile the model - this is the key part
compiled_model = torch.compile(model, backend="npu")

# First run will be slower as it compiles
output = compiled_model(x)

# Subsequent runs will be faster
for _ in range(10):
    output = compiled_model(x)
    print(output)

# support_torch_compile();
