|
--- |
|
license: apache-2.0 |
|
--- |
|
|
|
|
|
This is the script i use for training ; this is a specialist task so if you over train it will adust the expected output acordingly .. this should be used as a Template to use the model after training . |
|
Reset the template BACK to the original Mistral Template ! |
|
|
|
|
|
|
|
```python |
|
|
|
|
|
alpaca_prompt = """ |
|
|
|
### question: |
|
|
|
|
|
Define this verb, {} |
|
|
|
|
|
|
|
### Response: |
|
|
|
gerunds = {} |
|
participles = {} |
|
indicatives = {} |
|
subjuntives = {} |
|
conditionals = {} |
|
imperatives = {} |
|
|
|
|
|
|
|
""" |
|
|
|
EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN |
|
def formatting_prompts_func(examples): |
|
infinitives = examples["infinitive"] |
|
gerunds = examples["gerund"] |
|
participles = examples["participle"] |
|
indicatives = examples["indicative"] |
|
subjuntives = examples["subjuntive"] |
|
conditionals = examples["conditional"] |
|
imperatives = examples["imperative"] |
|
texts = [] |
|
for infinitive, gerund, participle,indicative,subjuntive,conditional,imperative in zip(infinitives, gerunds, participles,indicatives,subjuntives,conditionals,imperatives): |
|
# Must add EOS_TOKEN, otherwise your generation will go on forever! |
|
text = alpaca_prompt.format(infinitive, gerund, participle,indicative,subjuntive,conditional,imperative) + EOS_TOKEN |
|
texts.append(text) |
|
return { "text" : texts, } |
|
pass |
|
from datasets import load_dataset |
|
dataset = load_dataset("Define this verb,utations/dolphin-coder", split = "train[:100%]") |
|
dataset = dataset.map(formatting_prompts_func, batched = True,) |
|
|
|
|
|
|
|
``` |