alokabhishek commited on
Commit
99d3cee
1 Parent(s): a7cdaba

Updated Readme

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -76,7 +76,7 @@ model_llama = AutoAWQForCausalLM.from_quantized(model_id_llama, fuse_layer=True,
76
 
77
  # Set up the prompt and prompt template. Change instruction as per requirements.
78
  prompt_llama = "Tell me a funny joke about Large Language Models meeting a Blackhole in an intergalactic Bar."
79
- fromatted_prompt = f'''[INST] <<SYS>> You are a helpful, and fun loving assistant. Always answer as jestfully as possible. <</SYS>> {prompt_llama} [/INST] '''
80
 
81
  tokens = tokenizer_llama(fromatted_prompt, return_tensors="pt").input_ids.cuda()
82
 
 
76
 
77
  # Set up the prompt and prompt template. Change instruction as per requirements.
78
  prompt_llama = "Tell me a funny joke about Large Language Models meeting a Blackhole in an intergalactic Bar."
79
+ fromatted_prompt = f'''<s> [INST] You are a helpful, and fun loving assistant. Always answer as jestfully as possible.[/INST] </s> [INST] {prompt_llama}[/INST]'''
80
 
81
  tokens = tokenizer_llama(fromatted_prompt, return_tensors="pt").input_ids.cuda()
82