Update README.md
Browse files
README.md
CHANGED
@@ -21,6 +21,7 @@ I have made modifications to make it compatible with qwen1.5.
|
|
21 |
This model is converted with https://github.com/Minami-su/character_AI_open/blob/main/llamafy_qwen_v2.py
|
22 |
|
23 |
Usage:
|
|
|
24 |
```python
|
25 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
26 |
tokenizer = AutoTokenizer.from_pretrained("Minami-su/Qwen1.5-0.5B-Chat_llamafy")
|
@@ -33,6 +34,7 @@ messages = [
|
|
33 |
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
34 |
inputs = inputs.to("cuda")
|
35 |
generate_ids = model.generate(inputs,max_length=2048, streamer=streamer)
|
|
|
36 |
```
|
37 |
|
38 |
|
@@ -41,7 +43,7 @@ generate_ids = model.generate(inputs,max_length=2048, streamer=streamer)
|
|
41 |
|
42 |
|
43 |
load in 4bit
|
44 |
-
|
45 |
hf-causal (pretrained=Qwen1.5-0.5B-Chat), limit: None, provide_description: False, num_fewshot: 0, batch_size: 32
|
46 |
| Task |Version| Metric |Value | |Stderr|
|
47 |
|-------------|------:|--------|-----:|---|-----:|
|
@@ -51,10 +53,10 @@ hf-causal (pretrained=Qwen1.5-0.5B-Chat), limit: None, provide_description: Fals
|
|
51 |
| | |mc2 |0.4322|± |0.0151|
|
52 |
|winogrande | 0|acc |0.5564|± |0.0140|
|
53 |
|
54 |
-
|
55 |
|
56 |
load in 4bit
|
57 |
-
|
58 |
hf-causal (pretrained=Qwen1.5-0.5B-Chat_llamafy), limit: None, provide_description: False, num_fewshot: 0, batch_size: 32
|
59 |
| Task |Version| Metric |Value | |Stderr|
|
60 |
|-------------|------:|--------|-----:|---|-----:|
|
@@ -63,5 +65,5 @@ hf-causal (pretrained=Qwen1.5-0.5B-Chat_llamafy), limit: None, provide_descripti
|
|
63 |
|truthfulqa_mc| 1|mc1 |0.2534|± |0.0152|
|
64 |
| | |mc2 |0.4322|± |0.0151|
|
65 |
|winogrande | 0|acc |0.5556|± |0.0140|
|
66 |
-
|
67 |
|
|
|
21 |
This model is converted with https://github.com/Minami-su/character_AI_open/blob/main/llamafy_qwen_v2.py
|
22 |
|
23 |
Usage:
|
24 |
+
|
25 |
```python
|
26 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
27 |
tokenizer = AutoTokenizer.from_pretrained("Minami-su/Qwen1.5-0.5B-Chat_llamafy")
|
|
|
34 |
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
35 |
inputs = inputs.to("cuda")
|
36 |
generate_ids = model.generate(inputs,max_length=2048, streamer=streamer)
|
37 |
+
|
38 |
```
|
39 |
|
40 |
|
|
|
43 |
|
44 |
|
45 |
load in 4bit
|
46 |
+
```
|
47 |
hf-causal (pretrained=Qwen1.5-0.5B-Chat), limit: None, provide_description: False, num_fewshot: 0, batch_size: 32
|
48 |
| Task |Version| Metric |Value | |Stderr|
|
49 |
|-------------|------:|--------|-----:|---|-----:|
|
|
|
53 |
| | |mc2 |0.4322|± |0.0151|
|
54 |
|winogrande | 0|acc |0.5564|± |0.0140|
|
55 |
|
56 |
+
```
|
57 |
|
58 |
load in 4bit
|
59 |
+
```
|
60 |
hf-causal (pretrained=Qwen1.5-0.5B-Chat_llamafy), limit: None, provide_description: False, num_fewshot: 0, batch_size: 32
|
61 |
| Task |Version| Metric |Value | |Stderr|
|
62 |
|-------------|------:|--------|-----:|---|-----:|
|
|
|
65 |
|truthfulqa_mc| 1|mc1 |0.2534|± |0.0152|
|
66 |
| | |mc2 |0.4322|± |0.0151|
|
67 |
|winogrande | 0|acc |0.5556|± |0.0140|
|
68 |
+
```
|
69 |
|