efederici commited on
Commit
3b39835
1 Parent(s): c43a3bf

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -7
README.md CHANGED
@@ -8,6 +8,7 @@ tags:
8
  - mistral
9
  - chatml
10
  - axolotl
 
11
  prompt_template: <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|>
12
  <|im_start|>assistant
13
  model-index:
@@ -73,21 +74,17 @@ gen = GenerationConfig(
73
  eos_token_id=tokenizer.convert_tokens_to_ids("<|im_end|>")
74
  )
75
 
 
 
76
  messages = [
77
  {"role": "system", "content": "Sei un assistente utile."},
78
  {"role": "user", "content": "{prompt}"}
79
  ]
80
 
81
- with torch.no_grad(), torch.backends.cuda.sdp_kernel(
82
- enable_flash=True,
83
- enable_math=False,
84
- enable_mem_efficient=False
85
- ):
86
  temp = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
87
  inputs = tokenizer(temp, return_tensors="pt").to("cuda")
88
 
89
- streamer = TextStreamer(tokenizer, skip_prompt=True)
90
-
91
  _ = model.generate(
92
  **inputs,
93
  streamer=streamer,
 
8
  - mistral
9
  - chatml
10
  - axolotl
11
+ - kpo
12
  prompt_template: <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|>
13
  <|im_start|>assistant
14
  model-index:
 
74
  eos_token_id=tokenizer.convert_tokens_to_ids("<|im_end|>")
75
  )
76
 
77
+ streamer = TextStreamer(tokenizer, skip_prompt=True)
78
+
79
  messages = [
80
  {"role": "system", "content": "Sei un assistente utile."},
81
  {"role": "user", "content": "{prompt}"}
82
  ]
83
 
84
+ with torch.no_grad():
 
 
 
 
85
  temp = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
86
  inputs = tokenizer(temp, return_tensors="pt").to("cuda")
87
 
 
 
88
  _ = model.generate(
89
  **inputs,
90
  streamer=streamer,