JustinLin610 commited on
Commit
4cd30db
1 Parent(s): 7cbf3e3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -2
README.md CHANGED
@@ -46,7 +46,10 @@ Here provides a code snippet with `apply_chat_template` to show you how to load
46
  from transformers import AutoModelForCausalLM, AutoTokenizer
47
  device = "cuda" # the device to load the model onto
48
 
49
- model = AutoModelForCausalLM.from_pretrained("Qwen2/Qwen2-beta-4B-Chat", device_map="auto")
 
 
 
50
  tokenizer = AutoTokenizer.from_pretrained("Qwen2/Qwen2-beta-4B-Chat")
51
 
52
  prompt = "Give me a short introduction to large language model."
@@ -55,7 +58,8 @@ messages = [
55
  {"role": "user", "content": prompt}
56
  ]
57
  text = tokenizer.apply_chat_template(
58
- messages, tokenize=False,
 
59
  add_generation_prompt=True
60
  )
61
  model_inputs = tokenizer([text], return_tensors="pt").to(device)
 
46
  from transformers import AutoModelForCausalLM, AutoTokenizer
47
  device = "cuda" # the device to load the model onto
48
 
49
+ model = AutoModelForCausalLM.from_pretrained(
50
+ "Qwen2/Qwen2-beta-4B-Chat",
51
+ device_map="auto"
52
+ )
53
  tokenizer = AutoTokenizer.from_pretrained("Qwen2/Qwen2-beta-4B-Chat")
54
 
55
  prompt = "Give me a short introduction to large language model."
 
58
  {"role": "user", "content": prompt}
59
  ]
60
  text = tokenizer.apply_chat_template(
61
+ messages,
62
+ tokenize=False,
63
  add_generation_prompt=True
64
  )
65
  model_inputs = tokenizer([text], return_tensors="pt").to(device)