lorocksUMD commited on
Commit
4bd0321
1 Parent(s): d30dcf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer
4
- from llava.model.language_model import LlavaMistralForCausalLM
5
  from llava.model.builder import load_pretrained_model
6
  from llava.mm_utils import get_model_name_from_path
7
 
@@ -12,12 +12,12 @@ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
  model_path = "liuhaotian/llava-v1.6-mistral-7b"
14
  model_name = get_model_name_from_path(model_path)
15
- tokenizer = AutoTokenizer.from_pretrained(model_path)
16
- model = LlavaMistralForCausalLM.from_pretrained(
17
- model_path,
18
- low_cpu_mem_usage=True,
19
- # offload_folder="/content/sample_data"
20
- )
21
 
22
  # tokenizer, model, image_processor, context_len = load_pretrained_model(
23
  # model_path, None, model_name
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer
4
+ # from llava.model.language_model import LlavaMistralForCausalLM
5
  from llava.model.builder import load_pretrained_model
6
  from llava.mm_utils import get_model_name_from_path
7
 
 
12
 
13
  model_path = "liuhaotian/llava-v1.6-mistral-7b"
14
  model_name = get_model_name_from_path(model_path)
15
+ # tokenizer = AutoTokenizer.from_pretrained(model_path)
16
+ # model = LlavaMistralForCausalLM.from_pretrained(
17
+ # model_path,
18
+ # low_cpu_mem_usage=True,
19
+ # # offload_folder="/content/sample_data"
20
+ # )
21
 
22
  # tokenizer, model, image_processor, context_len = load_pretrained_model(
23
  # model_path, None, model_name