File size: 1,595 Bytes
ba686eb b4ce26a ae44080 cf2ebc5 b4ce26a cf2ebc5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
---
license: apache-2.0
---
This is the script i use for training ; this is a specialist task so if you over train it will adust the expected output acordingly .. this should be used as a Template to use the model after training .
Reset the template BACK to the original Mistral Template !
```python
alpaca_prompt = """
### question:
Define this verb, {}
### Response:
gerunds = {}
participles = {}
indicatives = {}
subjuntives = {}
conditionals = {}
imperatives = {}
"""
EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN
def formatting_prompts_func(examples):
infinitives = examples["infinitive"]
gerunds = examples["gerund"]
participles = examples["participle"]
indicatives = examples["indicative"]
subjuntives = examples["subjuntive"]
conditionals = examples["conditional"]
imperatives = examples["imperative"]
texts = []
for infinitive, gerund, participle,indicative,subjuntive,conditional,imperative in zip(infinitives, gerunds, participles,indicatives,subjuntives,conditionals,imperatives):
# Must add EOS_TOKEN, otherwise your generation will go on forever!
text = alpaca_prompt.format(infinitive, gerund, participle,indicative,subjuntive,conditional,imperative) + EOS_TOKEN
texts.append(text)
return { "text" : texts, }
pass
from datasets import load_dataset
dataset = load_dataset("Define this verb,utations/dolphin-coder", split = "train[:100%]")
dataset = dataset.map(formatting_prompts_func, batched = True,)
``` |