chrisjay commited on
Commit
77a8005
β€’
1 Parent(s): 720d356

added info on unique users

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -160,13 +160,13 @@ def update_data_per_env(rl_env):
160
 
161
 
162
 
163
- def get_info_display(len_dataframe,env_name,name_leaderboard,is_empty):
164
  if not is_empty:
165
  markdown = """
166
  <div class='infoPoint'>
167
  <h1> {name_leaderboard} </h1>
168
  <br>
169
- <p> This is a leaderboard of <b>{len_dataframe}</b> agents playing {env_name} πŸ‘©β€πŸš€. </p>
170
  <br>
171
  <p> We use lower bound result to sort the models: mean_reward - std_reward. </p>
172
  <br>
@@ -175,7 +175,7 @@ def get_info_display(len_dataframe,env_name,name_leaderboard,is_empty):
175
  <p> You want to try your model? Read this <a href="https://github.com/huggingface/deep-rl-class/blob/Unit1/unit1/README.md" target="_blank">Unit 1</a> of Deep Reinforcement Learning Class.
176
  </p>
177
  </div>
178
- """.format(len_dataframe = len_dataframe,env_name = env_name,name_leaderboard = name_leaderboard)
179
 
180
  else:
181
  markdown = """
@@ -205,7 +205,7 @@ def reload_leaderboard(rl_env):
205
 
206
  data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data']
207
 
208
- markdown = get_info_display(len(data_dataframe),rl_env,RL_DETAILS[rl_env]['title'],is_empty)
209
 
210
  return markdown,data_html
211
 
@@ -226,7 +226,7 @@ with block:
226
  for rl_env in RL_ENVS:
227
  with gr.TabItem(rl_env) as rl_tab:
228
  data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data']
229
- markdown = get_info_display(len(data_dataframe),rl_env,RL_DETAILS[rl_env]['title'],is_empty)
230
  env_state =gr.Variable(default_value=rl_env)
231
  output_markdown = gr.HTML(markdown)
232
  reload = gr.Button('Reload Leaderboard')
 
160
 
161
 
162
 
163
+ def get_info_display(dataframe,env_name,name_leaderboard,is_empty):
164
  if not is_empty:
165
  markdown = """
166
  <div class='infoPoint'>
167
  <h1> {name_leaderboard} </h1>
168
  <br>
169
+ <p> This is a leaderboard of <b>{len_dataframe}</b> agents, from <b>{num_unique_users}</b> unique users, playing {env_name} πŸ‘©β€πŸš€. </p>
170
  <br>
171
  <p> We use lower bound result to sort the models: mean_reward - std_reward. </p>
172
  <br>
 
175
  <p> You want to try your model? Read this <a href="https://github.com/huggingface/deep-rl-class/blob/Unit1/unit1/README.md" target="_blank">Unit 1</a> of Deep Reinforcement Learning Class.
176
  </p>
177
  </div>
178
+ """.format(len_dataframe = len(dataframe),env_name = env_name,name_leaderboard = name_leaderboard,num_unique_users = len(set(dataframe['User'].values)))
179
 
180
  else:
181
  markdown = """
 
205
 
206
  data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data']
207
 
208
+ markdown = get_info_display(data_dataframe,rl_env,RL_DETAILS[rl_env]['title'],is_empty)
209
 
210
  return markdown,data_html
211
 
 
226
  for rl_env in RL_ENVS:
227
  with gr.TabItem(rl_env) as rl_tab:
228
  data_html,data_dataframe,is_empty = RL_DETAILS[rl_env]['data']
229
+ markdown = get_info_display(data_dataframe,rl_env,RL_DETAILS[rl_env]['title'],is_empty)
230
  env_state =gr.Variable(default_value=rl_env)
231
  output_markdown = gr.HTML(markdown)
232
  reload = gr.Button('Reload Leaderboard')