Update README.md
Browse files
README.md
CHANGED
@@ -22,12 +22,16 @@ GGUF
|
|
22 |
## Usage
|
23 |
|
24 |
~~~Bash
|
25 |
-
pip install auto-gptq[triton]==0.4.2 transformers
|
26 |
~~~
|
27 |
|
28 |
~~~python
|
|
|
29 |
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
30 |
-
from transformers import AutoTokenizer
|
|
|
|
|
|
|
31 |
|
32 |
model_name_or_path = "mmnga/cyberagent-calm2-7b-chat-GPTQ-calib-ja-1k"
|
33 |
|
@@ -35,7 +39,7 @@ model_name_or_path = "mmnga/cyberagent-calm2-7b-chat-GPTQ-calib-ja-1k"
|
|
35 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
|
36 |
|
37 |
# Model
|
38 |
-
model = AutoGPTQForCausalLM.from_quantized(model_name_or_path, use_safetensors=True, device="cuda:0")
|
39 |
|
40 |
# Your test prompt
|
41 |
prompt = """
|
|
|
22 |
## Usage
|
23 |
|
24 |
~~~Bash
|
25 |
+
pip install auto-gptq[triton]==0.4.2 transformers==4.34.1
|
26 |
~~~
|
27 |
|
28 |
~~~python
|
29 |
+
import torch
|
30 |
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
|
31 |
+
from transformers import AutoTokenizer , AutoModelForCausalLM
|
32 |
+
|
33 |
+
if torch.cuda.is_available():
|
34 |
+
device_name = torch.cuda.get_device_name(0)
|
35 |
|
36 |
model_name_or_path = "mmnga/cyberagent-calm2-7b-chat-GPTQ-calib-ja-1k"
|
37 |
|
|
|
39 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
|
40 |
|
41 |
# Model
|
42 |
+
model = AutoGPTQForCausalLM.from_quantized(model_name_or_path, use_safetensors=True, device="cuda:0", use_triton=("A100" in device_name))
|
43 |
|
44 |
# Your test prompt
|
45 |
prompt = """
|