Spaces:
Sleeping
Sleeping
Updated app to include tensorboard links and new training metrics
Browse filesUpdated app to include tensorboard links and training metrics. Need to be updated to include newly trained models
app.py
CHANGED
@@ -1,19 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoTokenizer
|
3 |
from peft.auto import AutoPeftModelForSequenceClassification
|
|
|
|
|
|
|
4 |
|
5 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
6 |
loraModel = AutoPeftModelForSequenceClassification.from_pretrained("Intradiction/text_classification_WithLORA")
|
7 |
|
8 |
tokenizer1 = AutoTokenizer.from_pretrained("albert-base-v2")
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
#pretrained models
|
13 |
-
#STSmodel_pipe = pipeline()
|
14 |
-
#NLImodel_pipe = pipeline()
|
15 |
-
|
16 |
-
# Handle calls to DistilBERT
|
17 |
distilBERTUntrained_pipe = pipeline("sentiment-analysis", model="bert-base-uncased")
|
18 |
distilBERTnoLORA_pipe = pipeline(model="Intradiction/text_classification_NoLORA")
|
19 |
distilBERTwithLORA_pipe = pipeline("sentiment-analysis", model=loraModel, tokenizer=tokenizer)
|
@@ -29,7 +26,7 @@ def distilBERTUntrained_fn(text):
|
|
29 |
return distilBERTUntrained_pipe(text)
|
30 |
|
31 |
|
32 |
-
# Handle calls to ALBERT
|
33 |
ALbertUntrained_pipe = pipeline("text-classification", model="albert-base-v2")
|
34 |
AlbertnoLORA_pipe = pipeline(model="Intradiction/NLI-Conventional-Fine-Tuning")
|
35 |
#AlbertwithLORA_pipe = pipeline()
|
@@ -45,7 +42,7 @@ def AlbertUntrained_fn(text1, text2):
|
|
45 |
return ALbertUntrained_pipe({'text': text1, 'text_pair': text2})
|
46 |
|
47 |
|
48 |
-
# Handle calls to Deberta
|
49 |
DebertaUntrained_pipe = pipeline("text-classification", model="microsoft/deberta-v3-xsmall")
|
50 |
#DebertanoLORA_pipe = pipeline()
|
51 |
#DebertawithLORA_pipe = pipeline()
|
@@ -60,18 +57,79 @@ def DebertawithLORA_fn(text1, text2):
|
|
60 |
def DebertaUntrained_fn(text1, text2):
|
61 |
return DebertaUntrained_pipe({'text': text1, 'text_pair': text2})
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
|
77 |
with gr.Blocks(
|
@@ -81,7 +139,7 @@ with gr.Blocks(
|
|
81 |
gr.Markdown("""
|
82 |
<div style="overflow: hidden;color:#fff;display: flex;flex-direction: column;align-items: center; position: relative; width: 100%; height: 180px;background-size: cover; background-image: url(https://www.grssigns.co.uk/wp-content/uploads/web-Header-Background.jpg);">
|
83 |
<img style="width: 130px;height: 60px;position: absolute;top:10px;left:10px" src="https://www.torontomu.ca/content/dam/tmumobile/images/TMU-Mobile-AppIcon.png"/>
|
84 |
-
<span style="margin-top: 40px;font-size: 36px ;font-family:fantasy;">Efficient Fine Tuning
|
85 |
<span style="margin-top: 10px;font-size: 14px;">By: Rahul Adams, Greylyn Gao, Rajevan Logarajah & Mahir Faisal</span>
|
86 |
<span style="margin-top: 5px;font-size: 14px;">Group Id: AR06 FLC: Alice Reuda</span>
|
87 |
</div>
|
@@ -102,6 +160,8 @@ with gr.Blocks(
|
|
102 |
with gr.Column(variant="panel"):
|
103 |
inp = gr.Textbox(placeholder="Prompt",label= "Enter Query")
|
104 |
btn = gr.Button("Run")
|
|
|
|
|
105 |
gr.Examples(
|
106 |
[
|
107 |
"I thought this was a bit contrived",
|
@@ -115,29 +175,22 @@ with gr.Blocks(
|
|
115 |
with gr.Column(scale=3):
|
116 |
with gr.Row(variant="panel"):
|
117 |
TextClassOut = gr.Textbox(label= "Untrained Base Model")
|
118 |
-
gr.
|
119 |
-
<span><center><B>Training Information</B><center></span>
|
120 |
-
<span><br><br><br><br><br></span>
|
121 |
-
</div>""")
|
122 |
|
123 |
with gr.Row(variant="panel"):
|
124 |
TextClassOut1 = gr.Textbox(label= "Conventionaly Trained Model")
|
125 |
-
gr.
|
126 |
-
<span><center><B>Training Information</B><center></span>
|
127 |
-
<span><br><br><br><br><br></span>
|
128 |
-
</div>""")
|
129 |
|
130 |
with gr.Row(variant="panel"):
|
131 |
TextClassOut2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
132 |
-
gr.
|
133 |
-
<span><center><B>Training Information</B><center></span>
|
134 |
-
<span><br><br><br><br><br></span>
|
135 |
-
</div>""")
|
136 |
|
137 |
btn.click(fn=distilBERTUntrained_fn, inputs=inp, outputs=TextClassOut)
|
138 |
btn.click(fn=distilBERTnoLORA_fn, inputs=inp, outputs=TextClassOut1)
|
139 |
btn.click(fn=distilBERTwithLORA_fn, inputs=inp, outputs=TextClassOut2)
|
140 |
-
|
|
|
|
|
141 |
|
142 |
with gr.Tab("Natural Language Inferencing"):
|
143 |
with gr.Row():
|
@@ -155,6 +208,8 @@ with gr.Blocks(
|
|
155 |
nli_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
156 |
nli_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
157 |
nli_btn = gr.Button("Run")
|
|
|
|
|
158 |
gr.Examples(
|
159 |
[
|
160 |
"I am with my friends",
|
@@ -177,28 +232,23 @@ with gr.Blocks(
|
|
177 |
with gr.Column(scale=3):
|
178 |
with gr.Row(variant="panel"):
|
179 |
NLIOut = gr.Textbox(label= "Untrained Base Model")
|
180 |
-
gr.
|
181 |
-
<span><center><B>Training Information</B><center></span>
|
182 |
-
<span><br><br><br><br><br></span>
|
183 |
-
</div>""")
|
184 |
|
185 |
with gr.Row(variant="panel"):
|
186 |
NLIOut1 = gr.Textbox(label= "Conventionaly Trained Model")
|
187 |
-
gr.
|
188 |
-
<span><center><B>Training Information</B><center></span>
|
189 |
-
<span><br><br><br><br><br></span>
|
190 |
-
</div>""")
|
191 |
|
192 |
with gr.Row(variant="panel"):
|
193 |
NLIOut2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
194 |
-
gr.
|
195 |
-
<span><center><B>Training Information</B><center></span>
|
196 |
-
<span><br><br><br><br><br></span>
|
197 |
-
</div>""")
|
198 |
|
199 |
nli_btn.click(fn=AlbertUntrained_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut)
|
200 |
nli_btn.click(fn=AlbertnoLORA_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut1)
|
201 |
nli_btn.click(fn=AlbertwithLORA_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut2)
|
|
|
|
|
|
|
|
|
202 |
|
203 |
with gr.Tab("Semantic Text Similarity"):
|
204 |
with gr.Row():
|
@@ -216,6 +266,8 @@ with gr.Blocks(
|
|
216 |
sts_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
217 |
sts_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
218 |
sts_btn = gr.Button("Run")
|
|
|
|
|
219 |
gr.Examples(
|
220 |
[
|
221 |
"the ball is green",
|
@@ -238,28 +290,22 @@ with gr.Blocks(
|
|
238 |
with gr.Column(scale=3):
|
239 |
with gr.Row(variant="panel"):
|
240 |
sts_out = gr.Textbox(label= "Untrained Base Model")
|
241 |
-
gr.
|
242 |
-
<span><center><B>Training Information</B><center></span>
|
243 |
-
<span><br><br><br><br><br></span>
|
244 |
-
</div>""")
|
245 |
|
246 |
with gr.Row(variant="panel"):
|
247 |
sts_out1 = gr.Textbox(label= "Conventionally Trained Model")
|
248 |
-
gr.
|
249 |
-
<span><center><B>Training Information</B><center></span>
|
250 |
-
<span><br><br><br><br><br></span>
|
251 |
-
</div>""")
|
252 |
|
253 |
with gr.Row(variant="panel"):
|
254 |
sts_out2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
255 |
-
gr.
|
256 |
-
<span><center><B>Training Informadtion</B><center></span>
|
257 |
-
<span><br><br><br><br><br></span>
|
258 |
-
</div>""")
|
259 |
|
260 |
sts_btn.click(fn=DebertaUntrained_fn, inputs=[sts_p1,sts_p2], outputs=sts_out)
|
261 |
sts_btn.click(fn=DebertanoLORA_fn, inputs=[sts_p1,sts_p2], outputs=sts_out1)
|
262 |
sts_btn.click(fn=DebertawithLORA_fn, inputs=[sts_p1,sts_p2], outputs=sts_out2)
|
|
|
|
|
|
|
263 |
|
264 |
with gr.Tab("More informatioen"):
|
265 |
gr.Markdown("stuff to add")
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoTokenizer
|
3 |
from peft.auto import AutoPeftModelForSequenceClassification
|
4 |
+
from tensorboard.backend.event_processing import event_accumulator
|
5 |
+
import plotly.express as px
|
6 |
+
import pandas as pd
|
7 |
|
8 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
9 |
loraModel = AutoPeftModelForSequenceClassification.from_pretrained("Intradiction/text_classification_WithLORA")
|
10 |
|
11 |
tokenizer1 = AutoTokenizer.from_pretrained("albert-base-v2")
|
12 |
|
13 |
+
# Handle calls to DistilBERT------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
distilBERTUntrained_pipe = pipeline("sentiment-analysis", model="bert-base-uncased")
|
15 |
distilBERTnoLORA_pipe = pipeline(model="Intradiction/text_classification_NoLORA")
|
16 |
distilBERTwithLORA_pipe = pipeline("sentiment-analysis", model=loraModel, tokenizer=tokenizer)
|
|
|
26 |
return distilBERTUntrained_pipe(text)
|
27 |
|
28 |
|
29 |
+
# Handle calls to ALBERT---------------------------------------------
|
30 |
ALbertUntrained_pipe = pipeline("text-classification", model="albert-base-v2")
|
31 |
AlbertnoLORA_pipe = pipeline(model="Intradiction/NLI-Conventional-Fine-Tuning")
|
32 |
#AlbertwithLORA_pipe = pipeline()
|
|
|
42 |
return ALbertUntrained_pipe({'text': text1, 'text_pair': text2})
|
43 |
|
44 |
|
45 |
+
# Handle calls to Deberta--------------------------------------------
|
46 |
DebertaUntrained_pipe = pipeline("text-classification", model="microsoft/deberta-v3-xsmall")
|
47 |
#DebertanoLORA_pipe = pipeline()
|
48 |
#DebertawithLORA_pipe = pipeline()
|
|
|
57 |
def DebertaUntrained_fn(text1, text2):
|
58 |
return DebertaUntrained_pipe({'text': text1, 'text_pair': text2})
|
59 |
|
60 |
+
#helper functions ------------------------------------------------------
|
61 |
+
|
62 |
+
#Text metrics for Untrained models
|
63 |
+
def displayMetricStatsUntrained():
|
64 |
+
return "No statistics to display for untrained models"
|
65 |
+
|
66 |
+
def displayMetricStatsText():
|
67 |
+
file_name = 'events.out.tfevents.1701212945.784ae33ab242.985.0'
|
68 |
+
event_acc = event_accumulator.EventAccumulator(file_name,
|
69 |
+
size_guidance={
|
70 |
+
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
|
71 |
+
event_accumulator.IMAGES: 4,
|
72 |
+
event_accumulator.AUDIO: 4,
|
73 |
+
event_accumulator.SCALARS: 0,
|
74 |
+
event_accumulator.HISTOGRAMS: 1,
|
75 |
+
})
|
76 |
+
|
77 |
+
event_acc.Reload()
|
78 |
+
accuracy_data = event_acc.Scalars('eval/accuracy')
|
79 |
+
loss_data = event_acc.Scalars('eval/loss')
|
80 |
+
metrics = ''
|
81 |
+
for i in range(0, len(loss_data)):
|
82 |
+
metrics = metrics + 'Epoch Number: ' + str(i) + '\n'
|
83 |
+
metrics = metrics + 'Accuracy (%): ' + str(round(accuracy_data[i].value * 100, 3)) + '\n'
|
84 |
+
metrics = metrics + 'Loss (%): ' + str(round(loss_data[i].value * 100, 3)) + '\n\n'
|
85 |
+
|
86 |
+
return metrics
|
87 |
+
|
88 |
+
def displayMetricStatsGraph():
|
89 |
+
file_name = 'events.out.tfevents.1701212945.784ae33ab242.985.0'
|
90 |
+
event_acc = event_accumulator.EventAccumulator(file_name,
|
91 |
+
size_guidance={
|
92 |
+
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
|
93 |
+
event_accumulator.IMAGES: 4,
|
94 |
+
event_accumulator.AUDIO: 4,
|
95 |
+
event_accumulator.SCALARS: 0,
|
96 |
+
event_accumulator.HISTOGRAMS: 1,
|
97 |
+
})
|
98 |
+
|
99 |
+
event_acc.Reload()
|
100 |
+
accuracy_data = event_acc.Scalars('eval/accuracy')
|
101 |
+
loss_data = event_acc.Scalars("eval/loss")
|
102 |
+
epoch = []
|
103 |
+
metric = []
|
104 |
+
group = []
|
105 |
+
for i in range(0, len(accuracy_data)):
|
106 |
+
epoch.append(str(i))
|
107 |
+
metric.append(accuracy_data[i].value)
|
108 |
+
group.append('G1')
|
109 |
+
for j in range(0, len(loss_data)):
|
110 |
+
epoch.append(str(j))
|
111 |
+
metric.append(loss_data[j].value)
|
112 |
+
group.append('G2')
|
113 |
+
data = pd.DataFrame()
|
114 |
+
data['Epoch'] = epoch
|
115 |
+
data['Metric'] = metric
|
116 |
+
data['Group'] = group
|
117 |
+
|
118 |
+
#generate the actual plot
|
119 |
+
return px.line(data, x = 'Epoch', y = 'Metric', color=group, markers = True)
|
120 |
+
|
121 |
+
|
122 |
+
# #placeholder
|
123 |
+
# def chat1(message,history):
|
124 |
+
# history = history or []
|
125 |
+
# message = message.lower()
|
126 |
+
# if message.startswith("how many"):
|
127 |
+
# response = ("1 to 10")
|
128 |
+
# else:
|
129 |
+
# response = ("whatever man whatever manwhatever manwhatever manwhatever manwhatever manwhatever manwhatever manwhatever manwhatever manwhatever manwhatever man")
|
130 |
+
|
131 |
+
# history.append((message, response))
|
132 |
+
# return history, history
|
133 |
|
134 |
|
135 |
with gr.Blocks(
|
|
|
139 |
gr.Markdown("""
|
140 |
<div style="overflow: hidden;color:#fff;display: flex;flex-direction: column;align-items: center; position: relative; width: 100%; height: 180px;background-size: cover; background-image: url(https://www.grssigns.co.uk/wp-content/uploads/web-Header-Background.jpg);">
|
141 |
<img style="width: 130px;height: 60px;position: absolute;top:10px;left:10px" src="https://www.torontomu.ca/content/dam/tmumobile/images/TMU-Mobile-AppIcon.png"/>
|
142 |
+
<span style="margin-top: 40px;font-size: 36px ;font-family:fantasy;">Efficient Fine Tuning Of Large Language Models</span>
|
143 |
<span style="margin-top: 10px;font-size: 14px;">By: Rahul Adams, Greylyn Gao, Rajevan Logarajah & Mahir Faisal</span>
|
144 |
<span style="margin-top: 5px;font-size: 14px;">Group Id: AR06 FLC: Alice Reuda</span>
|
145 |
</div>
|
|
|
160 |
with gr.Column(variant="panel"):
|
161 |
inp = gr.Textbox(placeholder="Prompt",label= "Enter Query")
|
162 |
btn = gr.Button("Run")
|
163 |
+
btnTextClassStats = gr.Button("Display Training Metrics")
|
164 |
+
btnTensorLink = gr.Button(value="View Tensorboard Graphs", link="https://huggingface.co/Intradiction/text_classification_NoLORA/tensorboard")
|
165 |
gr.Examples(
|
166 |
[
|
167 |
"I thought this was a bit contrived",
|
|
|
175 |
with gr.Column(scale=3):
|
176 |
with gr.Row(variant="panel"):
|
177 |
TextClassOut = gr.Textbox(label= "Untrained Base Model")
|
178 |
+
TextClassUntrained = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
179 |
|
180 |
with gr.Row(variant="panel"):
|
181 |
TextClassOut1 = gr.Textbox(label= "Conventionaly Trained Model")
|
182 |
+
TextClassNoLoraStats = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
183 |
|
184 |
with gr.Row(variant="panel"):
|
185 |
TextClassOut2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
186 |
+
TextClassLoraStats = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
187 |
|
188 |
btn.click(fn=distilBERTUntrained_fn, inputs=inp, outputs=TextClassOut)
|
189 |
btn.click(fn=distilBERTnoLORA_fn, inputs=inp, outputs=TextClassOut1)
|
190 |
btn.click(fn=distilBERTwithLORA_fn, inputs=inp, outputs=TextClassOut2)
|
191 |
+
btnTextClassStats.click(fn=displayMetricStatsUntrained, outputs=TextClassUntrained)
|
192 |
+
btnTextClassStats.click(fn=displayMetricStatsText, outputs=TextClassNoLoraStats)
|
193 |
+
btnTextClassStats.click(fn=DebertawithLORA_fn, outputs=TextClassLoraStats) #to be changed
|
194 |
|
195 |
with gr.Tab("Natural Language Inferencing"):
|
196 |
with gr.Row():
|
|
|
208 |
nli_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
209 |
nli_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
210 |
nli_btn = gr.Button("Run")
|
211 |
+
btnNLIStats = gr.Button("Display Training Metrics")
|
212 |
+
btnTensorLink1 = gr.Button(value="View Tensorboard Graphs", link="https://huggingface.co/Intradiction/text_classification_NoLORA/tensorboard") #to be changed
|
213 |
gr.Examples(
|
214 |
[
|
215 |
"I am with my friends",
|
|
|
232 |
with gr.Column(scale=3):
|
233 |
with gr.Row(variant="panel"):
|
234 |
NLIOut = gr.Textbox(label= "Untrained Base Model")
|
235 |
+
NLIUntrained = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
236 |
|
237 |
with gr.Row(variant="panel"):
|
238 |
NLIOut1 = gr.Textbox(label= "Conventionaly Trained Model")
|
239 |
+
NLINoLoraStats = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
240 |
|
241 |
with gr.Row(variant="panel"):
|
242 |
NLIOut2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
243 |
+
NLILoraStats = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
244 |
|
245 |
nli_btn.click(fn=AlbertUntrained_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut)
|
246 |
nli_btn.click(fn=AlbertnoLORA_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut1)
|
247 |
nli_btn.click(fn=AlbertwithLORA_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut2)
|
248 |
+
btnNLIStats.click(fn=displayMetricStatsUntrained, outputs=NLIUntrained)
|
249 |
+
#btnNLIStats.click(fn=displayMetricStatsUntrained, outputs=NLINoLoraStats)
|
250 |
+
#btnNLIStats.click(fn=displayMetricStatsUntrained, outputs=NLILoraStats)
|
251 |
+
|
252 |
|
253 |
with gr.Tab("Semantic Text Similarity"):
|
254 |
with gr.Row():
|
|
|
266 |
sts_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
267 |
sts_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
268 |
sts_btn = gr.Button("Run")
|
269 |
+
btnSTSStats = gr.Button("Display Training Metrics")
|
270 |
+
btnTensorLink2 = gr.Button(value="View Tensorboard Graphs", link="https://huggingface.co/Intradiction/text_classification_NoLORA/tensorboard") #to be changed
|
271 |
gr.Examples(
|
272 |
[
|
273 |
"the ball is green",
|
|
|
290 |
with gr.Column(scale=3):
|
291 |
with gr.Row(variant="panel"):
|
292 |
sts_out = gr.Textbox(label= "Untrained Base Model")
|
293 |
+
STSUntrained = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
294 |
|
295 |
with gr.Row(variant="panel"):
|
296 |
sts_out1 = gr.Textbox(label= "Conventionally Trained Model")
|
297 |
+
STSNoLoraStats = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
298 |
|
299 |
with gr.Row(variant="panel"):
|
300 |
sts_out2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
301 |
+
STSLoraStats = gr.Textbox(label = "Training Informaiton")
|
|
|
|
|
|
|
302 |
|
303 |
sts_btn.click(fn=DebertaUntrained_fn, inputs=[sts_p1,sts_p2], outputs=sts_out)
|
304 |
sts_btn.click(fn=DebertanoLORA_fn, inputs=[sts_p1,sts_p2], outputs=sts_out1)
|
305 |
sts_btn.click(fn=DebertawithLORA_fn, inputs=[sts_p1,sts_p2], outputs=sts_out2)
|
306 |
+
btnSTSStats.click(fn=displayMetricStatsUntrained, outputs=STSUntrained)
|
307 |
+
#btnSTSStats.click(fn=displayMetricStatsUntrained, outputs=STSNoLoraStats)
|
308 |
+
#btnSTSStats.click(fn=displayMetricStatsUntrained, outputs=STSLoraStats)
|
309 |
|
310 |
with gr.Tab("More informatioen"):
|
311 |
gr.Markdown("stuff to add")
|