import torch
import torch.nn as nn

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

import torchtext
from torchtext.models import RobertaClassificationHead
from torchtext.functional import to_tensor
xlmr_large = torchtext.models.XLMR_LARGE_ENCODER
classifier_head = torchtext.models.RobertaClassificationHead(num_classes=2, input_dim=1024)
model = xlmr_large.get_model(head=classifier_head)
transformer = xlmr_large.transform()

# 使用两种方式， 一种是大的 batch_size, 另一种是小的 batch_size

samll_input_batch = [
    "Hello world",
    "How are your"
]
big_input_batch = [
    "Hello world",
    "How are you",
"""`Well, Prince, so Genoa and Lucca are now just family estates of the
Buonapartes. But I warn you, if you don't tell me that this means war,
if you still try to defend the infamies and horrors perpetrated by
that Antichrist- I really believe he is Antichrist- I will have
nothing more to do with you and you are no longer my friend, no longer
my 'faithful slave,' as you call yourself! But how do you do? I see
I have frightened you- sit down and tell me all the news.`

It was in July, 1805, and the speaker was the well-known Anna
Pavlovna Scherer, maid of honor and favorite of the Empress Marya
Fedorovna. With these words she greeted Prince Vasili Kuragin, a man
of high rank and importance, who was the first to arrive at her
reception. Anna Pavlovna had had a cough for some days. She was, as
she said, suffering from la grippe; grippe being then a new word in
St. Petersburg, used only by the elite."""
]

input_batch = big_input_batch
model_input = to_tensor(transformer(input_batch),padding_value=1)
output = model(model_input)
print(f"ooutput shape: {output.shape}")

iterations = 10
#  在 CPU 上运行性能测试， 使有 BT fastpath 和不适用 BT fastpath
print("slow path")

with torch.autograd.profiler.profile(use_cuda=False) as prof:
    for i in range(iterations):
        output = model(model_input)
print(prof)

model.eval()

print("fast path")
with torch.autograd.profiler.profile(use_cuda=False) as prof:
    with torch.no_grad():
        for i in range(iterations)
            output = model(model_input)
print(prof)

# 检查 BT sparsity setting
print(model.encoder.transformer.layers.enable_nested_tensor)

model.encoder.transformer.layers.enable_nested_tensor=False

model.to(device)
model_input = model_input.to(device)

print("slow path:")
with torch.autograd.profiler.profile(use_cuda=True) as prof:
    for i in range(iterations):
        output = model(model_input)

print(prof)

print("fast path")
with torch.autograd.profiler.profile(use_cuda=True) as prof:
    with torch.no_grad():
        for i in range(iterations):
            output = model(model_input)

print(prof)
