Update README.md
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ datasets:
|
|
5 |
|
6 |

|
7 |
|
8 |
-
# Model card for RWKV-4 | 3B parameters
|
9 |
|
10 |
RWKV is a project led by [Bo Peng](https://github.com/BlinkDL). Learn more about the model architecture in the blogposts from Johan Wind [here](https://johanwind.github.io/2023/03/23/rwkv_overview.html) and [here](https://johanwind.github.io/2023/03/23/rwkv_details.html). Learn more about the project by joining the [RWKV discord server](https://discordapp.com/users/468093332535640064).
|
11 |
|
@@ -47,8 +47,8 @@ You can use the `AutoModelForCausalLM` and `AutoTokenizer` classes to generate t
|
|
47 |
```python
|
48 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
49 |
|
50 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-
|
51 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-
|
52 |
|
53 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
54 |
|
@@ -66,8 +66,8 @@ print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True))
|
|
66 |
```python
|
67 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
68 |
|
69 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-
|
70 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-
|
71 |
|
72 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
73 |
|
@@ -89,8 +89,8 @@ print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True))
|
|
89 |
import torch
|
90 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
91 |
|
92 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-
|
93 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-
|
94 |
|
95 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
96 |
|
@@ -110,8 +110,8 @@ print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True))
|
|
110 |
# pip install accelerate
|
111 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
112 |
|
113 |
-
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-
|
114 |
-
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-
|
115 |
|
116 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
117 |
|
|
|
5 |
|
6 |

|
7 |
|
8 |
+
# Model card for RWKV-4 | 3B parameters chat version (Raven)
|
9 |
|
10 |
RWKV is a project led by [Bo Peng](https://github.com/BlinkDL). Learn more about the model architecture in the blogposts from Johan Wind [here](https://johanwind.github.io/2023/03/23/rwkv_overview.html) and [here](https://johanwind.github.io/2023/03/23/rwkv_details.html). Learn more about the project by joining the [RWKV discord server](https://discordapp.com/users/468093332535640064).
|
11 |
|
|
|
47 |
```python
|
48 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
49 |
|
50 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-raven-3b")
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-raven-3b")
|
52 |
|
53 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
54 |
|
|
|
66 |
```python
|
67 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
68 |
|
69 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-raven-3b").to(0)
|
70 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-raven-3b")
|
71 |
|
72 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
73 |
|
|
|
89 |
import torch
|
90 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
91 |
|
92 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-raven-3b", torch_dtype=torch.float16).to(0)
|
93 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-raven-3b")
|
94 |
|
95 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
96 |
|
|
|
110 |
# pip install accelerate
|
111 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
112 |
|
113 |
+
model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-raven-3b", device_map="auto")
|
114 |
+
tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-raven-3b")
|
115 |
|
116 |
prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese."
|
117 |
|