Artples commited on
Commit
7ab9082
1 Parent(s): 3bcd597

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,9 +12,9 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # Merlinite-7b Chat
16
 
17
- This Space demonstrates model [Merlinite-7b](https://huggingface.co/ibm/merlinite-7b) by IBM, a finetuned Mistral model with 7B parameters for chat instructions.
18
 
19
  """
20
 
@@ -24,7 +24,7 @@ if not torch.cuda.is_available():
24
 
25
 
26
  if torch.cuda.is_available():
27
- model_id = "ibm/merlinite-7b"
28
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
  tokenizer.use_default_system_prompt = False
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # Hermes-2 Pro 7b Chat
16
 
17
+ This Space demonstrates [Hermes-2 Pro 7b](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B) by Nous, a finetuned Mistral model with 7B parameters for chat instructions.
18
 
19
  """
20
 
 
24
 
25
 
26
  if torch.cuda.is_available():
27
+ model_id = "NousResearch/Hermes-2-Pro-Mistral-7B"
28
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
  tokenizer.use_default_system_prompt = False