chiliu commited on
Commit
db69dce
1 Parent(s): faed5af
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -87,13 +87,13 @@ from mamba_gpt_pipeline import MambaGPTTextGenerationPipeline
87
  from transformers import AutoModelForCausalLM, AutoTokenizer
88
 
89
  tokenizer = AutoTokenizer.from_pretrained(
90
- "CobraMamba/mamba-gpt-3b",
91
  use_fast=False,
92
  padding_side="left",
93
  trust_remote_code=False,
94
  )
95
  model = AutoModelForCausalLM.from_pretrained(
96
- "CobraMamba/mamba-gpt-3b",
97
  torch_dtype="auto",
98
  device_map={"": "cuda:0"},
99
  trust_remote_code=False,
@@ -119,7 +119,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
119
  ```python
120
  from transformers import AutoModelForCausalLM, AutoTokenizer
121
 
122
- model_name = "CobraMamba/mamba-gpt-3b" # either local folder or huggingface model name
123
  # Important: The prompt needs to be in the same format the model was trained with.
124
  # You can find an example prompt in the experiment logs.
125
  prompt = "<|prompt|>How are you?</s><|answer|>"
 
87
  from transformers import AutoModelForCausalLM, AutoTokenizer
88
 
89
  tokenizer = AutoTokenizer.from_pretrained(
90
+ "CobraMamba/mamba-gpt-3b-v2",
91
  use_fast=False,
92
  padding_side="left",
93
  trust_remote_code=False,
94
  )
95
  model = AutoModelForCausalLM.from_pretrained(
96
+ "CobraMamba/mamba-gpt-3b-v2",
97
  torch_dtype="auto",
98
  device_map={"": "cuda:0"},
99
  trust_remote_code=False,
 
119
  ```python
120
  from transformers import AutoModelForCausalLM, AutoTokenizer
121
 
122
+ model_name = "CobraMamba/mamba-gpt-3b-v2" # either local folder or huggingface model name
123
  # Important: The prompt needs to be in the same format the model was trained with.
124
  # You can find an example prompt in the experiment logs.
125
  prompt = "<|prompt|>How are you?</s><|answer|>"