backup
Browse files
app.py
CHANGED
@@ -28,35 +28,19 @@ check_environment()
|
|
28 |
login(token=os.environ["HF_TOKEN"], add_to_git_credential=True)
|
29 |
|
30 |
# Load model and processor (do this outside the inference function to avoid reloading)
|
31 |
-
base_model_path =
|
|
|
|
|
32 |
# lora_weights_path = "taesiri/BugsBunny-LLama-3.2-11B-Vision-Base-Medium-LoRA"
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
# )
|
40 |
-
|
41 |
-
from transformers import AutoModelForCausalLM, AutoProcessor, LlamaTokenizer
|
42 |
-
import torch
|
43 |
-
|
44 |
-
model_path = "taesiri/BugsBunny-LLama-3.2-11B-Vision-Instruct-Medium-FullModel"
|
45 |
-
|
46 |
-
# Load the processor (handles both text and vision inputs)
|
47 |
-
processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
|
48 |
-
|
49 |
-
# Load the model
|
50 |
-
model = AutoModelForCausalLM.from_pretrained(
|
51 |
-
model_path, torch_dtype=torch.bfloat16, device_map="cuda", trust_remote_code=True
|
52 |
)
|
53 |
-
|
54 |
-
# If you specifically need the tokenizer
|
55 |
-
tokenizer = LlamaTokenizer.from_pretrained(model_path)
|
56 |
-
|
57 |
-
model.tie_weights()
|
58 |
-
|
59 |
# model = PeftModel.from_pretrained(model, lora_weights_path)
|
|
|
60 |
|
61 |
|
62 |
def describe_image_in_JSON(json_string):
|
|
|
28 |
login(token=os.environ["HF_TOKEN"], add_to_git_credential=True)
|
29 |
|
30 |
# Load model and processor (do this outside the inference function to avoid reloading)
|
31 |
+
base_model_path = (
|
32 |
+
"taesiri/BugsBunny-LLama-3.2-11B-Vision-BaseCaptioner-Medium-FullModel"
|
33 |
+
)
|
34 |
# lora_weights_path = "taesiri/BugsBunny-LLama-3.2-11B-Vision-Base-Medium-LoRA"
|
35 |
|
36 |
+
processor = AutoProcessor.from_pretrained(base_model_path)
|
37 |
+
model = MllamaForConditionalGeneration.from_pretrained(
|
38 |
+
base_model_path,
|
39 |
+
torch_dtype=torch.bfloat16,
|
40 |
+
device_map="cuda",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# model = PeftModel.from_pretrained(model, lora_weights_path)
|
43 |
+
model.tie_weights()
|
44 |
|
45 |
|
46 |
def describe_image_in_JSON(json_string):
|