SeanScripts
commited on
Commit
•
a9cab05
1
Parent(s):
c119e42
Update README.md
Browse files
README.md
CHANGED
@@ -70,7 +70,7 @@ generated_tokens = output.size(1) - prompt_tokens
|
|
70 |
time_per_token = generated_tokens/total_time
|
71 |
print(f"Generated {generated_tokens} tokens in {total_time:.3f} s ({time_per_token:.3f} tok/s)")
|
72 |
|
73 |
-
response = processor.tokenizer.decode(output[0, prompt_tokens:])
|
74 |
print(response)
|
75 |
|
76 |
torch.cuda.empty_cache()
|
|
|
70 |
time_per_token = generated_tokens/total_time
|
71 |
print(f"Generated {generated_tokens} tokens in {total_time:.3f} s ({time_per_token:.3f} tok/s)")
|
72 |
|
73 |
+
response = processor.tokenizer.decode(output[0, prompt_tokens:], skip_special_tokens=True)
|
74 |
print(response)
|
75 |
|
76 |
torch.cuda.empty_cache()
|