Spaces:
Runtime error
Runtime error
edited UI
Browse files- .gitignore +1 -0
- .streamlit/config.toml +1 -1
- Koya Presentation.png +0 -0
- Koya_Presentation-removebg-preview.png +0 -0
- __pycache__/styling.cpython-39.pyc +0 -0
- app.py +28 -4
- requirements.txt +1 -1
- styling.py +1 -1
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__pycache__
|
.streamlit/config.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
[theme]
|
2 |
primaryColor = "#E694FF"
|
3 |
backgroundColor = "#DCDCDC"
|
4 |
-
secondaryBackgroundColor = "#
|
5 |
textColor = "#00172B"
|
6 |
font = "sans-serif"
|
|
|
1 |
[theme]
|
2 |
primaryColor = "#E694FF"
|
3 |
backgroundColor = "#DCDCDC"
|
4 |
+
secondaryBackgroundColor = "#B8E3F5"
|
5 |
textColor = "#00172B"
|
6 |
font = "sans-serif"
|
Koya Presentation.png
ADDED
Koya_Presentation-removebg-preview.png
ADDED
__pycache__/styling.cpython-39.pyc
DELETED
Binary file (1.92 kB)
|
|
app.py
CHANGED
@@ -18,12 +18,11 @@ st.markdown(
|
|
18 |
"""
|
19 |
|
20 |
# Koya Recommeder System
|
21 |
-
|
22 |
You can try it below \n\n\n"""
|
23 |
)
|
24 |
|
25 |
|
26 |
-
|
27 |
@st.cache
|
28 |
def get_model_infos(multilingual="multilingual"):
|
29 |
api = HfApi()
|
@@ -209,15 +208,40 @@ def set_seed():
|
|
209 |
np.random.seed(2023)
|
210 |
torch.manual_seed(2023)
|
211 |
|
|
|
212 |
with st.sidebar:
|
213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
footer()
|
215 |
|
216 |
sentence = st.text_input("Please input a sample sentence in the target language")
|
217 |
|
218 |
models = get_model_infos(multilingual=None)
|
219 |
selected_models = st.multiselect(
|
220 |
-
"Select of number of models you would like to compare",
|
|
|
|
|
221 |
)
|
222 |
|
223 |
run = st.button("Get Scores")
|
|
|
18 |
"""
|
19 |
|
20 |
# Koya Recommeder System
|
21 |
+
#### π Welcome to the to the Koya recommendation system. This system recommeds an LLM for you when you provide a sample sentence in your target language and select a list of models.
|
22 |
You can try it below \n\n\n"""
|
23 |
)
|
24 |
|
25 |
|
|
|
26 |
@st.cache
|
27 |
def get_model_infos(multilingual="multilingual"):
|
28 |
api = HfApi()
|
|
|
208 |
np.random.seed(2023)
|
209 |
torch.manual_seed(2023)
|
210 |
|
211 |
+
|
212 |
with st.sidebar:
|
213 |
+
|
214 |
+
st.image("Koya_Presentation-removebg-preview.png")
|
215 |
+
st.subheader("Abstract")
|
216 |
+
st.markdown(
|
217 |
+
"""
|
218 |
+
<div style="text-align: justify">
|
219 |
+
<h6> Pretrained large language models (LLMs) are widely used for various downstream tasks in different languages. However, selecting the best
|
220 |
+
LLM (from a large set of potential LLMs) for a given downstream task and language is a challenging and computationally expensive task, making
|
221 |
+
the efficient use of LLMs difficult for low-compute communities. To address this challenge, we present Koya, a recommender system built to assist
|
222 |
+
researchers and practitioners in choosing the right LLM for their task and language, without ever having to finetune the LLMs. Koya is built with
|
223 |
+
the Koya Pseudo-Perplexity (KPPPL), our adaptation of the pseudo perplexity, and ranks LLMs in order of compatibility with the language of interest,
|
224 |
+
making it easier and cheaper to choose the most compatible LLM. By evaluating Koya using five pretrained LLMs and three African languages
|
225 |
+
(Yoruba, Kinyarwanda, and Amharic), we show an average recommender accuracy of 95%, demonstrating its effectiveness. Koya aims to offer
|
226 |
+
an easy to use (through a simple web interface accessible at https://huggingface.co/spaces/koya-recommender/system), cost-effective, fast and
|
227 |
+
efficient tool to assist researchers and practitioners with low or limited compute access.</h6>
|
228 |
+
</div>
|
229 |
+
|
230 |
+
""",
|
231 |
+
unsafe_allow_html=True,
|
232 |
+
)
|
233 |
+
url = "https://share.streamlit.io/mesmith027/streamlit_webapps/main/MC_pi/streamlit_app.py"
|
234 |
+
st.write("check out the paper [here](%s)" % url)
|
235 |
+
|
236 |
footer()
|
237 |
|
238 |
sentence = st.text_input("Please input a sample sentence in the target language")
|
239 |
|
240 |
models = get_model_infos(multilingual=None)
|
241 |
selected_models = st.multiselect(
|
242 |
+
"Select of number of models you would like to compare",
|
243 |
+
models["id"],
|
244 |
+
max_selections=3,
|
245 |
)
|
246 |
|
247 |
run = st.button("Get Scores")
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
pandas
|
2 |
-
streamlit
|
3 |
numpy
|
4 |
torch
|
5 |
huggingface_hub
|
|
|
1 |
pandas
|
2 |
+
streamlit==1.12.0
|
3 |
numpy
|
4 |
torch
|
5 |
huggingface_hub
|
styling.py
CHANGED
@@ -68,7 +68,7 @@ def layout(*args):
|
|
68 |
def footer():
|
69 |
myargs = [
|
70 |
" with β€οΈ by ",
|
71 |
-
link("https://twitter.com/AbrahamOwos", "@AbrahamOwos "), "and ", link("https://twitter.com/
|
72 |
br(),
|
73 |
" βοΈ Have a suggestion? Hit us up" ,
|
74 |
]
|
|
|
68 |
def footer():
|
69 |
myargs = [
|
70 |
" with β€οΈ by ",
|
71 |
+
link("https://twitter.com/AbrahamOwos", "@AbrahamOwos "), "and ", link("https://twitter.com/ChrisEmezue", "@ChrisEmezue"),
|
72 |
br(),
|
73 |
" βοΈ Have a suggestion? Hit us up" ,
|
74 |
]
|