Mr-Vicky-01 commited on
Commit
dd01426
1 Parent(s): c693a58

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -65,12 +65,12 @@ prompt = prompt_template
65
  encodeds = tokenizer(prompt, return_tensors="pt", add_special_tokens=True).input_ids
66
 
67
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
68
- merged_model.to(device)
69
  inputs = encodeds.to(device)
70
 
71
 
72
  # Increase max_new_tokens if needed
73
- generated_ids = merged_model.generate(inputs, max_new_tokens=250, do_sample=False, pad_token_id=tokenizer.eos_token_id)
74
  ans = ''
75
  for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<end_of_turn>')[:2]:
76
  ans += i
 
65
  encodeds = tokenizer(prompt, return_tensors="pt", add_special_tokens=True).input_ids
66
 
67
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
68
+ model.to(device)
69
  inputs = encodeds.to(device)
70
 
71
 
72
  # Increase max_new_tokens if needed
73
+ generated_ids = model.generate(inputs, max_new_tokens=250, do_sample=False, pad_token_id=tokenizer.eos_token_id)
74
  ans = ''
75
  for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<end_of_turn>')[:2]:
76
  ans += i