Artples commited on
Commit
7ee9db0
1 Parent(s): e0fa233

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,9 +12,9 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # FuseChat-7b
16
 
17
- This Space demonstrates [FuseChat-7b](https://huggingface.co/FuseAI/FuseChat-7B-VaRM) by FuseAI.
18
 
19
  """
20
 
@@ -24,7 +24,7 @@ if not torch.cuda.is_available():
24
 
25
 
26
  if torch.cuda.is_available():
27
- model_id = "FuseAI/FuseChat-7B-VaRM"
28
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
  tokenizer.use_default_system_prompt = False
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # Starling-LM-7b
16
 
17
+ This Space demonstrates [Starling-LM-7b](https://huggingface.co/FuseAI/FuseChat-7B-VaRM) by Nexusflow.
18
 
19
  """
20
 
 
24
 
25
 
26
  if torch.cuda.is_available():
27
+ model_id = "Nexusflow/Starling-LM-7B-beta"
28
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
  tokenizer.use_default_system_prompt = False