Camil Ziane commited on
Commit
f04a800
1 Parent(s): 412e60a

feat: markdown

Browse files
TinyLLaVA_Factory/tinyllava/serve/app.py CHANGED
@@ -21,7 +21,7 @@ from tinyllava.utils import *
21
  from tinyllava.data import *
22
  from tinyllava.model import *
23
 
24
- DEFAULT_MODEL_PATH = "tinyllava/TinyLLaVA-Phi-2-SigLIP-3.1B"
25
 
26
 
27
  block_css = """
@@ -31,8 +31,11 @@ block_css = """
31
  }
32
  """
33
  title_markdown = """
34
- # TinyLLaVA: A Framework of Small-scale Large Multimodal Models
35
- [[Code](https://github.com/DLCV-BUAA/TinyLLaVABench)] | 📚 [[Paper](https://arxiv.org/pdf/2402.14289.pdf)]
 
 
 
36
  """
37
  tos_markdown = """
38
  ### Terms of use
@@ -345,6 +348,7 @@ if __name__ == "__main__":
345
  logger.info(gr.__version__)
346
  args = parse_args()
347
  model_name = args.model_name
 
348
  model, tokenizer, image_processor, context_len = load_pretrained_model(
349
  args.model_path,
350
  load_4bit=args.load_4bit,
 
21
  from tinyllava.data import *
22
  from tinyllava.model import *
23
 
24
+ DEFAULT_MODEL_PATH = "cpu4dream/llava-small-OpenELM-AIMv2-0.6B"
25
 
26
 
27
  block_css = """
 
31
  }
32
  """
33
  title_markdown = """
34
+ # Tiny Llava OpenELM-AIMv2 0.6B 🐛
35
+
36
+ ## Multimodal Image Question Answering on CPU
37
+
38
+ This space demonstrates the capabilities of the [cpu4dream/llava-small-OpenELM-AIMv2-0.6B](https://huggingface.co/cpu4dream/llava-small-OpenELM-AIMv2-0.6B) model, trained using the [TinyLLaVA Framework](https://github.com/TinyLLaVA/TinyLLaVA_Factory).
39
  """
40
  tos_markdown = """
41
  ### Terms of use
 
348
  logger.info(gr.__version__)
349
  args = parse_args()
350
  model_name = args.model_name
351
+ DEFAULT_MODEL_PATH = args.model_path
352
  model, tokenizer, image_processor, context_len = load_pretrained_model(
353
  args.model_path,
354
  load_4bit=args.load_4bit,