Update README.md
Browse files
README.md
CHANGED
|
@@ -163,11 +163,6 @@ model = AutoModel.from_pretrained('inclusionAI/LLaDA-MoE-7B-A1B-Instruct', trust
|
|
| 163 |
tokenizer = AutoTokenizer.from_pretrained('inclusionAI/LLaDA-MoE-7B-A1B-Instruct', trust_remote_code=True)
|
| 164 |
|
| 165 |
prompt = "Lily can run 12 kilometers per hour for 4 hours. After that, she runs 6 kilometers per hour. How many kilometers can she run in 8 hours?"
|
| 166 |
-
m = [
|
| 167 |
-
{"role": "system", "content": "You are a helpful AI assistant."},
|
| 168 |
-
{"role": "user", "content": prompt}
|
| 169 |
-
]
|
| 170 |
-
prompt = tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False)
|
| 171 |
|
| 172 |
input_ids = tokenizer(prompt)['input_ids']
|
| 173 |
input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
|
|
|
|
| 163 |
tokenizer = AutoTokenizer.from_pretrained('inclusionAI/LLaDA-MoE-7B-A1B-Instruct', trust_remote_code=True)
|
| 164 |
|
| 165 |
prompt = "Lily can run 12 kilometers per hour for 4 hours. After that, she runs 6 kilometers per hour. How many kilometers can she run in 8 hours?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
input_ids = tokenizer(prompt)['input_ids']
|
| 168 |
input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
|