seyabde commited on
Commit
4a68eb8
1 Parent(s): befc207

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -30,16 +30,16 @@ model = AutoModelForCausalLM.from_pretrained(
30
  torch_dtype='auto'
31
  ).eval()
32
 
33
- # Prompt content: "hi"
34
  messages = [
35
- {"role": "user", "content": "hi"}
36
  ]
37
 
38
  input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
39
  output_ids = model.generate(input_ids.to('cuda'))
40
  response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
41
 
42
- # Model response: "Hello! How can I assist you today?"
43
  print(response)
44
  ```
45
 
 
30
  torch_dtype='auto'
31
  ).eval()
32
 
33
+ # Prompt content: "Pẹlẹ o. Bawo ni o se wa?" ("Hello. How are you?")
34
  messages = [
35
+ {"role": "user", "content": "Pẹlẹ o. Bawo ni o se wa?"}
36
  ]
37
 
38
  input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
39
  output_ids = model.generate(input_ids.to('cuda'))
40
  response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
41
 
42
+ # Model response:
43
  print(response)
44
  ```
45