eswardivi commited on
Commit
1290a74
1 Parent(s): 7115ad7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -18,9 +18,9 @@ quantization_config = BitsAndBytesConfig(
18
  )
19
 
20
  model = AutoModelForCausalLM.from_pretrained(
21
- "meta-llama/Meta-Llama-3-8B-Instruct", quantization_config=quantization_config, token=token
22
  )
23
- tok = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", token=token)
24
  terminators = [
25
  tok.eos_token_id,
26
  tok.convert_tokens_to_ids("<|eot_id|>")
@@ -105,6 +105,6 @@ demo = gr.ChatInterface(
105
  ],
106
  stop_btn="Stop Generation",
107
  title="Chat With LLMs",
108
- description="Now Running [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) in 4bit"
109
  )
110
  demo.launch()
 
18
  )
19
 
20
  model = AutoModelForCausalLM.from_pretrained(
21
+ "NousResearch/Hermes-2-Pro-Llama-3-8B", quantization_config=quantization_config, token=token
22
  )
23
+ tok = AutoTokenizer.from_pretrained("NousResearch/Hermes-2-Pro-Llama-3-8B", token=token)
24
  terminators = [
25
  tok.eos_token_id,
26
  tok.convert_tokens_to_ids("<|eot_id|>")
 
105
  ],
106
  stop_btn="Stop Generation",
107
  title="Chat With LLMs",
108
+ description="Now Running [NousResearch/Hermes-2-Pro-Llama-3-8B](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B) in 4bit"
109
  )
110
  demo.launch()