Satyam-Singh commited on
Commit
9a3e47d
1 Parent(s): 4d74a88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
-
4
-
5
 
6
  def format_prompt(message, history):
7
  prompt = "<s>"
@@ -10,7 +10,8 @@ def format_prompt(message, history):
10
  prompt += f" {bot_response}</s> "
11
  prompt += f"[INST] {message} [/INST]"
12
  return prompt
13
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
14
 
15
  def generate(
16
  prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
+ import os
4
+ client = InferenceClient("Satyam-Singh/LLaVa-Large-Language-Virtual-Assistant")
5
 
6
  def format_prompt(message, history):
7
  prompt = "<s>"
 
10
  prompt += f" {bot_response}</s> "
11
  prompt += f"[INST] {message} [/INST]"
12
  return prompt
13
+
14
+ client = InferenceClient(os.environ.get('LLAVA'))
15
 
16
  def generate(
17
  prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,