Update README.md
Browse files
README.md
CHANGED
@@ -12,7 +12,7 @@ tags:
|
|
12 |
- meta
|
13 |
- text-generation-inference
|
14 |
---
|
15 |
-
# doshisha-mil/llama-2-70b-chat-4bit-japanese
|
16 |
|
17 |
This model is Llama-2-Chat 70B fine-tuned with the following Japanese version of the alpaca dataset.
|
18 |
|
@@ -50,7 +50,7 @@ bnb_config = BitsAndBytesConfig(
|
|
50 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
51 |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map="auto")
|
52 |
|
53 |
-
peft_name = "doshisha-mil/llama-2-70b-chat-4bit-japanese"
|
54 |
model = PeftModel.from_pretrained(
|
55 |
model,
|
56 |
peft_name,
|
|
|
12 |
- meta
|
13 |
- text-generation-inference
|
14 |
---
|
15 |
+
# doshisha-mil/llama-2-70b-chat-4bit-japanese-v1
|
16 |
|
17 |
This model is Llama-2-Chat 70B fine-tuned with the following Japanese version of the alpaca dataset.
|
18 |
|
|
|
50 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
51 |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map="auto")
|
52 |
|
53 |
+
peft_name = "doshisha-mil/llama-2-70b-chat-4bit-japanese-v1"
|
54 |
model = PeftModel.from_pretrained(
|
55 |
model,
|
56 |
peft_name,
|