Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,7 @@ from pathlib import Path
|
|
| 5 |
import re
|
| 6 |
from Model import OmniPathWithInterTaskAttention
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
| 8 |
import os
|
| 9 |
|
| 10 |
# 强制设置 Gradio 为英文环境
|
|
@@ -48,7 +49,7 @@ def load_models():
|
|
| 48 |
|
| 49 |
# 2. Load text generation model
|
| 50 |
# llm_model_name = "Qwen/Qwen3-0.6B"
|
| 51 |
-
llm_model_name = "meta-llama/Meta-Llama-3
|
| 52 |
tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
|
| 53 |
llm_model = AutoModelForCausalLM.from_pretrained(
|
| 54 |
llm_model_name,
|
|
|
|
| 5 |
import re
|
| 6 |
from Model import OmniPathWithInterTaskAttention
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 8 |
+
import transformers
|
| 9 |
import os
|
| 10 |
|
| 11 |
# 强制设置 Gradio 为英文环境
|
|
|
|
| 49 |
|
| 50 |
# 2. Load text generation model
|
| 51 |
# llm_model_name = "Qwen/Qwen3-0.6B"
|
| 52 |
+
llm_model_name = "meta-llama/Meta-Llama-3-8B"
|
| 53 |
tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
|
| 54 |
llm_model = AutoModelForCausalLM.from_pretrained(
|
| 55 |
llm_model_name,
|