artena commited on
Commit
9e31297
1 Parent(s): e50dc1b

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. utils.py +1 -1
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter
3
  from pathlib import Path
4
 
5
- from utils import LLM_BENCHMARKS_ABOUT_TEXT, LLM_BENCHMARKS_SUBMIT_TEXT, custom_css, jsonl_to_dataframe, add_average_column_to_df, apply_markdown_format_for_columns, submit, PART_LOGO, sort_dataframe_by_column
6
 
7
 
8
 
 
2
  from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter
3
  from pathlib import Path
4
 
5
+ from utils import LLM_BENCHMARKS_ABOUT_TEXT, LLM_BENCHMARKS_SUBMIT_TEXT, custom_css, jsonl_to_dataframe, add_average_column_to_df, apply_markdown_format_for_columns, submit, sort_dataframe_by_column
6
 
7
 
8
 
utils.py CHANGED
@@ -116,7 +116,7 @@ table > tbody td:first-child {
116
 
117
  LLM_BENCHMARKS_ABOUT_TEXT = f"""## Open Lithuanian LLM Leaderboard (v1.0.1)
118
  > The Open Lithuanian LLM Evaluation Leaderboard, developed by **Neurotechnology** provides a comprehensive benchmarking system specifically designed for Lithuanian LLMs. This leaderboard, based on the open-source [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), offers a unique platform for evaluating the performance of large language models (LLMs) on tasks that demand linguistic proficiency and technical skill in Lithuanian.
119
- > **Note:** This leaderboard is continuously updating its data and models, reflecting the latest developments in Lithuanian LLMs. It is currently in version 1.0.0, serving as the initial benchmark for Lithuanian LLM evaluation, with plans for future enhancements.
120
  ## 1. Key Features
121
  > 1. **Open Evaluation Access**
122
  > The leaderboard allows open participation, meaning that developers and researchers working with open-source models can submit evaluation requests for their models. This accessibility encourages the development and testing of Lithuanian LLMs within the broader AI ecosystem.
 
116
 
117
  LLM_BENCHMARKS_ABOUT_TEXT = f"""## Open Lithuanian LLM Leaderboard (v1.0.1)
118
  > The Open Lithuanian LLM Evaluation Leaderboard, developed by **Neurotechnology** provides a comprehensive benchmarking system specifically designed for Lithuanian LLMs. This leaderboard, based on the open-source [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness), offers a unique platform for evaluating the performance of large language models (LLMs) on tasks that demand linguistic proficiency and technical skill in Lithuanian.
119
+ > **Note:** This leaderboard is continuously updating its data and models, reflecting the latest developments in Lithuanian LLMs. It is currently in version 1.0.1, serving as the initial benchmark for Lithuanian LLM evaluation, with plans for future enhancements.
120
  ## 1. Key Features
121
  > 1. **Open Evaluation Access**
122
  > The leaderboard allows open participation, meaning that developers and researchers working with open-source models can submit evaluation requests for their models. This accessibility encourages the development and testing of Lithuanian LLMs within the broader AI ecosystem.