Update README.md
Browse files
README.md
CHANGED
@@ -7,7 +7,7 @@ pipeline_tag: summarization
|
|
7 |
|
8 |
## Dados Gerais
|
9 |
|
10 |
-
- **Nome:** [
|
11 |
- **Tipo:** Languege Model, Transformer Decoder-Only
|
12 |
- **Licença:** Language model
|
13 |
- **Modelo base:** [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit)
|
@@ -34,7 +34,7 @@ load_in_4bit = True
|
|
34 |
if True:
|
35 |
from unsloth import FastLanguageModel
|
36 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
37 |
-
model_name = "
|
38 |
max_seq_length = max_seq_length,
|
39 |
dtype = dtype,
|
40 |
load_in_4bit = load_in_4bit,
|
|
|
7 |
|
8 |
## Dados Gerais
|
9 |
|
10 |
+
- **Nome:** [lmsantos/llama3-cpqd](https://huggingface.co/lmsantos/llama3-cpqd)
|
11 |
- **Tipo:** Languege Model, Transformer Decoder-Only
|
12 |
- **Licença:** Language model
|
13 |
- **Modelo base:** [unsloth/llama-3-8b-Instruct-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-Instruct-bnb-4bit)
|
|
|
34 |
if True:
|
35 |
from unsloth import FastLanguageModel
|
36 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
37 |
+
model_name = "lmsantos/llama3-cpqd", # YOUR MODEL YOU USED FOR TRAINING
|
38 |
max_seq_length = max_seq_length,
|
39 |
dtype = dtype,
|
40 |
load_in_4bit = load_in_4bit,
|