import time
import torch
from modelscope import AutoTokenizer, AutoModelForCausalLM


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model_name = 'openai-community/gpt2'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
model.to(device)

for use_cache in [True, False]:
    times = []
    for _ in range(2):
        start = time.time()
        model.generate(**tokenizer('what is kv caching?', return_tensors='pt').to(device), use_cache=use_cache, max_new_tokens=1000)
        times.append(time.time() - start)

    print(times)