bofenghuang commited on
Commit
9c2b558
1 Parent(s): c7f02be
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -11,12 +11,12 @@ inference: false
11
  ---
12
 
13
  <p align="center" width="100%">
14
- <img src="https://huggingface.co/bofenghuang/vigogne-30b-instruct/resolve/main/vigogne_logo.png" alt="Vigogne" style="width: 40%; min-width: 300px; display: block; margin: auto;">
15
  </p>
16
 
17
- # Vigogne-30B-Instruct: A French Instruction-following LLaMA Model
18
 
19
- Vigogne-30B-Instruct is a LLaMA-30B model fine-tuned to follow the French instructions.
20
 
21
  For more information, please visit the Github repo: https://github.com/bofenghuang/vigogne
22
 
@@ -36,7 +36,7 @@ import torch
36
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
37
  from vigogne.preprocess import generate_instruct_prompt
38
 
39
- model_name_or_path = "bofenghuang/vigogne-30b-instruct"
40
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side="right", use_fast=False)
41
  model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, device_map="auto")
42
 
 
11
  ---
12
 
13
  <p align="center" width="100%">
14
+ <img src="https://huggingface.co/bofenghuang/vigogne-33b-instruct/resolve/main/vigogne_logo.png" alt="Vigogne" style="width: 40%; min-width: 300px; display: block; margin: auto;">
15
  </p>
16
 
17
+ # Vigogne-33B-Instruct: A French Instruction-following LLaMA Model
18
 
19
+ Vigogne-33B-Instruct is a LLaMA-33B model fine-tuned to follow the French instructions.
20
 
21
  For more information, please visit the Github repo: https://github.com/bofenghuang/vigogne
22
 
 
36
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
37
  from vigogne.preprocess import generate_instruct_prompt
38
 
39
+ model_name_or_path = "bofenghuang/vigogne-33b-instruct"
40
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side="right", use_fast=False)
41
  model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, device_map="auto")
42