Artples commited on
Commit
7fee0b4
1 Parent(s): b1629f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -9
app.py CHANGED
@@ -14,24 +14,17 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
14
  DESCRIPTION = """\
15
  # Labradorite-13b Chat
16
 
17
- This Space demonstrates model [Labradorite-13b](https://huggingface.co/ibm/labradorite-13b) by IBM, a finetuned Llama 2 model with 13B parameters for chat instructions.
18
 
19
  """
20
 
21
- LICENSE = """
22
- <p/>
23
-
24
- ---
25
- As a derivate work of Labradorite-13b](https://huggingface.co/ibm/labradorite-13b) by IBM,
26
- this demo is governed by the original [license](https://huggingface.co/spaces/Artples/Labradorite-13b-Chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/Artples/Labradorite-13b-Chat/blob/main/USE_POLICY.md).
27
- """
28
 
29
  if not torch.cuda.is_available():
30
  DESCRIPTION += "\n<p>Running on CPU! This demo does not work on CPU.</p>"
31
 
32
 
33
  if torch.cuda.is_available():
34
- model_id = "ibm/labradorite-13b"
35
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
36
  tokenizer = AutoTokenizer.from_pretrained(model_id)
37
  tokenizer.use_default_system_prompt = False
 
14
  DESCRIPTION = """\
15
  # Labradorite-13b Chat
16
 
17
+ This Space demonstrates model [Merlinite-7b](https://huggingface.co/ibm/merlinite-7b) by IBM, a finetuned Mistral model with 7B parameters for chat instructions.
18
 
19
  """
20
 
 
 
 
 
 
 
 
21
 
22
  if not torch.cuda.is_available():
23
  DESCRIPTION += "\n<p>Running on CPU! This demo does not work on CPU.</p>"
24
 
25
 
26
  if torch.cuda.is_available():
27
+ model_id = "ibm/merlinite-7b"
28
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
29
  tokenizer = AutoTokenizer.from_pretrained(model_id)
30
  tokenizer.use_default_system_prompt = False