AutoModelForConditionalGeneration -> T5ForConditionalGeneration (#3)
Browse files- AutoModelForConditionalGeneration -> T5ForConditionalGeneration (8f7211daa43bf51809b7293b59b7b5a787477930)
Co-authored-by: Koki Tanaka <gojiteji@users.noreply.huggingface.co>
README.md
CHANGED
@@ -100,9 +100,9 @@ For more efficient memory usage, we advise you to load the model in `8bit` using
|
|
100 |
|
101 |
```python
|
102 |
# pip install accelerate transformers bitsandbytes
|
103 |
-
from transformers import
|
104 |
import torch
|
105 |
-
model =
|
106 |
tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
107 |
|
108 |
input_string = "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?"
|
@@ -118,9 +118,9 @@ Otherwise, you can load and run the model in `bfloat16` as follows:
|
|
118 |
|
119 |
```python
|
120 |
# pip install accelerate transformers
|
121 |
-
from transformers import
|
122 |
import torch
|
123 |
-
model =
|
124 |
tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
125 |
|
126 |
input_string = "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?"
|
|
|
100 |
|
101 |
```python
|
102 |
# pip install accelerate transformers bitsandbytes
|
103 |
+
from transformers import T5ForConditionalGeneration, AutoTokenizer
|
104 |
import torch
|
105 |
+
model = T5ForConditionalGeneration.from_pretrained("google/flan-ul2", device_map="auto", load_in_8bit=True)
|
106 |
tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
107 |
|
108 |
input_string = "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?"
|
|
|
118 |
|
119 |
```python
|
120 |
# pip install accelerate transformers
|
121 |
+
from transformers import T5ForConditionalGeneration, AutoTokenizer
|
122 |
import torch
|
123 |
+
model = T5ForConditionalGeneration.from_pretrained("google/flan-ul2", torch_dtype=torch.bfloat16, device_map="auto")
|
124 |
tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
125 |
|
126 |
input_string = "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?"
|