jiajunlong
commited on
Commit
•
801dede
1
Parent(s):
e7dfeb6
Update README.md
Browse files
README.md
CHANGED
@@ -17,7 +17,7 @@ See the list below for the details of each model:
|
|
17 |
2. running the following command:
|
18 |
|
19 |
```bash
|
20 |
-
python generate_model --model
|
21 |
```
|
22 |
|
23 |
or execute the following test code:
|
@@ -25,11 +25,11 @@ or execute the following test code:
|
|
25 |
```python
|
26 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
27 |
from generate_model import *
|
28 |
-
model = AutoModelForCausalLM.from_pretrained("jiajunlong/TinyLLaVA-0.
|
29 |
config = model.config
|
30 |
-
tokenizer = AutoTokenizer.from_pretrained("jiajunlong/TinyLLaVA-0.
|
31 |
-
prompt="
|
32 |
-
image="
|
33 |
output_text, genertaion_time = generate(prompt=prompt, image=image, model=model, tokenizer=tokenizer)
|
34 |
print_txt = (
|
35 |
f'\r\n{"=" * os.get_terminal_size().columns}\r\n'
|
|
|
17 |
2. running the following command:
|
18 |
|
19 |
```bash
|
20 |
+
python generate_model --model jiajunlong/TinyLLaVA-0.89B --prompt 'you want to ask' --image '/path/to/related/image'
|
21 |
```
|
22 |
|
23 |
or execute the following test code:
|
|
|
25 |
```python
|
26 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
27 |
from generate_model import *
|
28 |
+
model = AutoModelForCausalLM.from_pretrained("jiajunlong/TinyLLaVA-0.89B", trust_remote_code=True)
|
29 |
config = model.config
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("jiajunlong/TinyLLaVA-0.89B", use_fast=False, model_max_length = config.tokenizer_model_max_length,padding_side = config.tokenizer_padding_side)
|
31 |
+
prompt="you want to ask"
|
32 |
+
image="/path/to/related/image"
|
33 |
output_text, genertaion_time = generate(prompt=prompt, image=image, model=model, tokenizer=tokenizer)
|
34 |
print_txt = (
|
35 |
f'\r\n{"=" * os.get_terminal_size().columns}\r\n'
|