Update README.md
Browse files
README.md
CHANGED
@@ -38,7 +38,7 @@ import torch
|
|
38 |
from transformers import pipeline
|
39 |
|
40 |
generate_text = pipeline(
|
41 |
-
model="
|
42 |
torch_dtype="auto",
|
43 |
trust_remote_code=True,
|
44 |
use_fast=True,
|
@@ -76,13 +76,13 @@ from h2oai_pipeline import H2OTextGenerationPipeline
|
|
76 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
77 |
|
78 |
tokenizer = AutoTokenizer.from_pretrained(
|
79 |
-
"
|
80 |
use_fast=True,
|
81 |
padding_side="left",
|
82 |
trust_remote_code=True,
|
83 |
)
|
84 |
model = AutoModelForCausalLM.from_pretrained(
|
85 |
-
"
|
86 |
torch_dtype="auto",
|
87 |
device_map={"": "cuda:0"},
|
88 |
trust_remote_code=True,
|
@@ -108,7 +108,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
|
|
108 |
```python
|
109 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
110 |
|
111 |
-
model_name = "
|
112 |
# Important: The prompt needs to be in the same format the model was trained with.
|
113 |
# You can find an example prompt in the experiment logs.
|
114 |
prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
|
@@ -182,7 +182,7 @@ This model was trained using H2O LLM Studio and with the configuration in [cfg.y
|
|
182 |
Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
|
183 |
|
184 |
```bash
|
185 |
-
CUDA_VISIBLE_DEVICES=0 python main.py --model hf-causal-experimental --model_args pretrained=
|
186 |
```
|
187 |
|
188 |
|
|
|
38 |
from transformers import pipeline
|
39 |
|
40 |
generate_text = pipeline(
|
41 |
+
model="PAIXAI/Astrid-1B",
|
42 |
torch_dtype="auto",
|
43 |
trust_remote_code=True,
|
44 |
use_fast=True,
|
|
|
76 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
77 |
|
78 |
tokenizer = AutoTokenizer.from_pretrained(
|
79 |
+
"PAIXAI/Astrid-1B",
|
80 |
use_fast=True,
|
81 |
padding_side="left",
|
82 |
trust_remote_code=True,
|
83 |
)
|
84 |
model = AutoModelForCausalLM.from_pretrained(
|
85 |
+
"PAIXAI/Astrid-1B",
|
86 |
torch_dtype="auto",
|
87 |
device_map={"": "cuda:0"},
|
88 |
trust_remote_code=True,
|
|
|
108 |
```python
|
109 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
110 |
|
111 |
+
model_name = "PAIXAI/Astrid-1B" # either local folder or huggingface model name
|
112 |
# Important: The prompt needs to be in the same format the model was trained with.
|
113 |
# You can find an example prompt in the experiment logs.
|
114 |
prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
|
|
|
182 |
Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
|
183 |
|
184 |
```bash
|
185 |
+
CUDA_VISIBLE_DEVICES=0 python main.py --model hf-causal-experimental --model_args pretrained=PAIXAI/Astrid-1B --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq --device cuda &> eval.log
|
186 |
```
|
187 |
|
188 |
|