codelion commited on
Commit
2506a39
1 Parent(s): d3dc6b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,12 +12,12 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # Mera Mixture Chat
16
  """
17
 
18
  LICENSE = """\
19
  ---
20
- This space is powered by the [mera-mix-4x7B](https://huggingface.co/meraGPT/mera-mix-4x7B) model which was created by [meraGPT](https://meraGPT.com).
21
  """
22
 
23
  if not torch.cuda.is_available():
@@ -25,7 +25,7 @@ if not torch.cuda.is_available():
25
 
26
 
27
  if torch.cuda.is_available():
28
- model_id = "meraGPT/mera-mix-4x7B"
29
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
  tokenizer.use_default_system_prompt = False
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # Chat with Patched Mixture of Experts (MoE) Model
16
  """
17
 
18
  LICENSE = """\
19
  ---
20
+ This space is powered by the patched-mix-4x7B model which was created by [patched](https://patched.codes).
21
  """
22
 
23
  if not torch.cuda.is_available():
 
25
 
26
 
27
  if torch.cuda.is_available():
28
+ model_id = "patched-codes/patched-mix-4x7B"
29
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
  tokenizer.use_default_system_prompt = False