Spaces:
Running
on
Zero
Running
on
Zero
revery gradio 5 for zeroGPU
Browse files
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: π π€ππ»
|
|
4 |
colorFrom: blue
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: true
|
10 |
license: mit
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.44.0
|
8 |
app_file: app.py
|
9 |
pinned: true
|
10 |
license: mit
|
app.py
CHANGED
@@ -8,13 +8,13 @@ model_path = "nvidia/Mistral-NeMo-Minitron-8B-Instruct"
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_path)
|
10 |
|
11 |
-
# Extract config info from model's configuration
|
12 |
-
config_info = model.config
|
13 |
|
14 |
-
# Create a Markdown string to display the complete model configuration information
|
15 |
-
model_info_md = "### Model Configuration: Mistral-NeMo-Minitron-8B-Instruct\n\n"
|
16 |
-
for key, value in config_info.to_dict().items():
|
17 |
-
|
18 |
|
19 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
20 |
# pipe.tokenizer = tokenizer
|
@@ -84,9 +84,9 @@ with gr.Blocks() as demo:
|
|
84 |
with gr.Column(scale=1):
|
85 |
with gr.Group():
|
86 |
gr.Markdown(presentation1)
|
87 |
-
with gr.Column(scale=1):
|
88 |
-
|
89 |
-
|
90 |
with gr.Row():
|
91 |
with gr.Column(scale=3):
|
92 |
chatbot = gr.Chatbot(label="π€ Mistral-NeMo", height=400)
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_path)
|
10 |
|
11 |
+
# # Extract config info from model's configuration
|
12 |
+
# config_info = model.config
|
13 |
|
14 |
+
# # Create a Markdown string to display the complete model configuration information
|
15 |
+
# model_info_md = "### Model Configuration: Mistral-NeMo-Minitron-8B-Instruct\n\n"
|
16 |
+
# for key, value in config_info.to_dict().items():
|
17 |
+
# model_info_md += f"- **{key.replace('_', ' ').capitalize()}**: {value}\n"
|
18 |
|
19 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
20 |
# pipe.tokenizer = tokenizer
|
|
|
84 |
with gr.Column(scale=1):
|
85 |
with gr.Group():
|
86 |
gr.Markdown(presentation1)
|
87 |
+
# with gr.Column(scale=1):
|
88 |
+
# with gr.Group():
|
89 |
+
# gr.Markdown(model_info_md)
|
90 |
with gr.Row():
|
91 |
with gr.Column(scale=3):
|
92 |
chatbot = gr.Chatbot(label="π€ Mistral-NeMo", height=400)
|