emonty777 commited on
Commit
aaf13a1
1 Parent(s): 219d72f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -3
README.md CHANGED
@@ -66,7 +66,7 @@ text = "Your text goes here..."
66
 
67
  # If you want to use CPU
68
  input_ids = tokenizer(text, return_tensors="pt", truncation=True).input_ids
69
- # Ir you want to use GPU
70
  input_ids = tokenizer(text, return_tensors="pt", truncation=True).input_ids.cuda()
71
  # Adjust max_new_tokens based on size. This is set up for articles of text
72
  outputs = model.generate(input_ids=input_ids, max_new_tokens=120, do_sample=False)
@@ -74,8 +74,6 @@ outputs = model.generate(input_ids=input_ids, max_new_tokens=120, do_sample=Fals
74
  print(f"input sentence: {sample['article']}\n{'---'* 20}")
75
  print(f"summary:\n{tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]}")
76
 
77
-
78
-
79
  ```
80
 
81
 
 
66
 
67
  # If you want to use CPU
68
  input_ids = tokenizer(text, return_tensors="pt", truncation=True).input_ids
69
+ # If you want to use GPU
70
  input_ids = tokenizer(text, return_tensors="pt", truncation=True).input_ids.cuda()
71
  # Adjust max_new_tokens based on size. This is set up for articles of text
72
  outputs = model.generate(input_ids=input_ids, max_new_tokens=120, do_sample=False)
 
74
  print(f"input sentence: {sample['article']}\n{'---'* 20}")
75
  print(f"summary:\n{tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]}")
76
 
 
 
77
  ```
78
 
79