Stevross commited on
Commit
dfbbf48
1 Parent(s): 979b5c9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -82,13 +82,13 @@ from h2oai_pipeline import H2OTextGenerationPipeline
82
  from transformers import AutoModelForCausalLM, AutoTokenizer
83
 
84
  tokenizer = AutoTokenizer.from_pretrained(
85
- "PAIXAI/Astrid-Mistral-7B,
86
  use_fast=True,
87
  padding_side="left",
88
  trust_remote_code=True,
89
  )
90
  model = AutoModelForCausalLM.from_pretrained(
91
- "PAIXAI/Astrid-Mistral-7B,
92
  torch_dtype="auto",
93
  device_map={"": "cuda:0"},
94
  trust_remote_code=True,
@@ -114,7 +114,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
114
  ```python
115
  from transformers import AutoModelForCausalLM, AutoTokenizer
116
 
117
- model_name = "PAIXAI/Astrid-Mistral-7B # either local folder or huggingface model name
118
  # Important: The prompt needs to be in the same format the model was trained with.
119
  # You can find an example prompt in the experiment logs.
120
  prompt = "<|prompt|>How are you?<|im_end|><|answer|>"
 
82
  from transformers import AutoModelForCausalLM, AutoTokenizer
83
 
84
  tokenizer = AutoTokenizer.from_pretrained(
85
+ "PAIXAI/Astrid-Mistral-7B",
86
  use_fast=True,
87
  padding_side="left",
88
  trust_remote_code=True,
89
  )
90
  model = AutoModelForCausalLM.from_pretrained(
91
+ "PAIXAI/Astrid-Mistral-7B",
92
  torch_dtype="auto",
93
  device_map={"": "cuda:0"},
94
  trust_remote_code=True,
 
114
  ```python
115
  from transformers import AutoModelForCausalLM, AutoTokenizer
116
 
117
+ model_name = "PAIXAI/Astrid-Mistral-7B" # either local folder or huggingface model name
118
  # Important: The prompt needs to be in the same format the model was trained with.
119
  # You can find an example prompt in the experiment logs.
120
  prompt = "<|prompt|>How are you?<|im_end|><|answer|>"