Mark Febrizio commited on
Commit
bd042b8
·
1 Parent(s): fdf2d60

Update app.py

Browse files

revise title and notes; remove sig filtering from topline numbers; fix errors with download

Files changed (1) hide show
  1. app.py +33 -16
app.py CHANGED
@@ -38,10 +38,10 @@ ui.include_css( Path(__file__).parent.joinpath("www") / "style.css")
38
 
39
 
40
  # this text appears in the browser tab
41
- TITLE = "CRA Window Tracker - GW Regulatory Studies Center"
42
 
43
  # page header above main content
44
- HEADER = "Rules in the Congressional Review Act (CRA) Window"
45
  page_header = ui.HTML(
46
  f"""
47
  <div class="header">
@@ -82,7 +82,7 @@ with ui.sidebar(open={"desktop": "open", "mobile": "closed"}):
82
 
83
  with ui.tooltip(placement="right", id="window_tooltip"):
84
  ui.input_date("start_date", "Select start of window", value=WINDOW_OPEN_DATE, min=START_DATE, max=date.today())
85
- "The estimated CRA window open date is May 23. See the notes for more information."
86
 
87
  ui.input_select("menu_significant", "Select rule significance", choices=["all", "3f1-significant", "other-significant"], selected="all", multiple=True, size=3)
88
 
@@ -127,7 +127,7 @@ with ui.navset_card_underline(title=""):
127
  with ui.card(full_screen=True):
128
  @render.data_frame
129
  def table_rule_detail():
130
- df = filtered_df().copy()
131
  df.loc[:, "date"] = df.loc[:, "publication_date"].apply(lambda x: f"{x.date()}")
132
  char, limit = " ", 10
133
  df.loc[:, "title"] = df["title"].apply(lambda x: x if len(x.split(char)) < (limit + 1) else f"{char.join(x.split(char)[:limit])}...")
@@ -234,22 +234,24 @@ with ui.accordion(open=False):
234
  "title",
235
  "type",
236
  "action",
237
- "abstract",
238
- "docket_ids",
239
  "json_url",
240
  "html_url",
241
  "agencies",
242
  "independent_reg_agency",
243
- "parent_slug",
244
- "subagency_slug",
245
  "president_id",
246
  "significant",
247
  "3f1_significant",
248
  "other_significant"
249
  )
250
  ):
 
 
 
 
251
  await asyncio.sleep(0.25)
252
- yield filtered_df().loc[:, output_cols].to_csv(index=False)
253
 
254
  # notes
255
  with ui.accordion(open=False):
@@ -258,9 +260,14 @@ with ui.accordion(open=False):
258
 
259
  ui.markdown(
260
  f"""
261
- Rule data retrieved from the [Federal Register API](https://www.federalregister.gov/developers/documentation/api/v1).
262
 
263
- The window for the CRA lookback period is [estimated](https://www.huntonak.com/the-nickel-report/federal-agencies-face-looming-congressional-review-act-deadline) to open on May 23, 2024.
 
 
 
 
 
264
  """
265
  )
266
 
@@ -288,6 +295,16 @@ def filtered_df():
288
  bool_agency = [True if sum(selected in agency for selected in input.menu_agency()) > 0 else False for agency in filt_df["parent_slug"]]
289
  filt_df = filt_df.loc[bool_agency]
290
 
 
 
 
 
 
 
 
 
 
 
291
  # filter significance
292
  bool_ = []
293
  if (input.menu_significant() is not None) and ("all" not in input.menu_significant()):
@@ -296,21 +313,21 @@ def filtered_df():
296
  if "other-significant" in input.menu_significant():
297
  bool_.append((filt_df["other_significant"] == 1).to_numpy())
298
  filt_df = filt_df.loc[array(bool_).any(axis=0)]
299
-
300
  # return filtered dataframe
301
  return filt_df
302
 
303
 
304
  @reactive.calc
305
  def grouped_df_month():
306
- filt_df = filtered_df()
307
  grouped = groupby_date(filt_df, significant=GET_SIGNIFICANT)
308
  return grouped
309
 
310
 
311
  @reactive.calc
312
  def grouped_df_day():
313
- filt_df = filtered_df()
314
  date_col = "publication_date"
315
  grouped = groupby_date(filt_df, group_col=date_col, significant=GET_SIGNIFICANT)
316
  grouped = pad_missing_dates(
@@ -327,7 +344,7 @@ def grouped_df_day():
327
 
328
  @reactive.calc
329
  def grouped_df_week():
330
- filt_df = filtered_df()
331
  filt_df = add_weeks_to_data(filt_df)
332
  try:
333
  grouped = groupby_date(filt_df, group_col=("week_number", "week_of"), significant=GET_SIGNIFICANT)
@@ -347,7 +364,7 @@ def grouped_df_week():
347
 
348
  @reactive.calc
349
  def grouped_df_agency():
350
- filt_df = filtered_df()
351
  grouped = groupby_agency(filt_df, metadata=METADATA, significant=GET_SIGNIFICANT)
352
  return grouped
353
 
 
38
 
39
 
40
  # this text appears in the browser tab
41
+ TITLE = "CRA Window Exploratory Dashboard - GW Regulatory Studies Center"
42
 
43
  # page header above main content
44
+ HEADER = "Congressional Review Act (CRA) Window Exploratory Dashboard"
45
  page_header = ui.HTML(
46
  f"""
47
  <div class="header">
 
82
 
83
  with ui.tooltip(placement="right", id="window_tooltip"):
84
  ui.input_date("start_date", "Select start of window", value=WINDOW_OPEN_DATE, min=START_DATE, max=date.today())
85
+ "The estimated lookback window open date is May 22. This dashboard allows users to explore how different lookback window dates would affect the set of rules available for congressional review. See the notes for more information."
86
 
87
  ui.input_select("menu_significant", "Select rule significance", choices=["all", "3f1-significant", "other-significant"], selected="all", multiple=True, size=3)
88
 
 
127
  with ui.card(full_screen=True):
128
  @render.data_frame
129
  def table_rule_detail():
130
+ df = filter_significance().copy()
131
  df.loc[:, "date"] = df.loc[:, "publication_date"].apply(lambda x: f"{x.date()}")
132
  char, limit = " ", 10
133
  df.loc[:, "title"] = df["title"].apply(lambda x: x if len(x.split(char)) < (limit + 1) else f"{char.join(x.split(char)[:limit])}...")
 
234
  "title",
235
  "type",
236
  "action",
 
 
237
  "json_url",
238
  "html_url",
239
  "agencies",
240
  "independent_reg_agency",
241
+ "parent_agencies",
242
+ "subagencies",
243
  "president_id",
244
  "significant",
245
  "3f1_significant",
246
  "other_significant"
247
  )
248
  ):
249
+ filt_df = filtered_df().copy()
250
+ filt_df.loc[:, "agencies"] = filt_df.loc[:, "agency_slugs"].apply(lambda x: "; ".join(x))
251
+ filt_df.loc[:, "parent_agencies"] = filt_df.loc[:, "parent_slug"].apply(lambda x: "; ".join(x))
252
+ filt_df.loc[:, "subagencies"] = filt_df.loc[:, "subagency_slug"].apply(lambda x: "; ".join(x))
253
  await asyncio.sleep(0.25)
254
+ yield filt_df.loc[:, [c for c in output_cols if c in filt_df.columns]].to_csv(index=False)
255
 
256
  # notes
257
  with ui.accordion(open=False):
 
260
 
261
  ui.markdown(
262
  f"""
263
+ Rule data are retrieved daily from the [Federal Register API](https://www.federalregister.gov/developers/documentation/api/v1), which publishes new editions of the Federal Register each business day.
264
 
265
+ The [Congressional Review Act](http://uscode.house.gov/view.xhtml?req=granuleid%3AUSC-prelim-title5-chapter8&saved=%7CKHRpdGxlOjUgc2VjdGlvbjo4MDEgZWRpdGlvbjpwcmVsaW0pIE9SIChncmFudWxlaWQ6VVNDLXByZWxpbS10aXRsZTUtc2VjdGlvbjgwMSk%3D%7CdHJlZXNvcnQ%3D%7C%7C0%7Cfalse%7Cprelim&edition=prelim) (CRA) lookback window” refers to the period starting [60 working days](https://crsreports.congress.gov/product/pdf/R/R46690#page=8) (either session days in the Senate or legislative days in the House of Representatives) before the current session of Congress adjourns and ending the day the subsequent session of Congress first convenes.
266
+ Rules that are published in the Federal Register and submitted to Congress during that time period are made available for review in the subsequent session of Congress.
267
+
268
+ Due to the retrospective calculation of the window, lookback window dates prior to Congress adjourning are inherently estimates.
269
+ Based on the published Congressional calendar for the second session of the 118th Congress, the current lookback window date [estimate](https://www.huntonak.com/the-nickel-report/federal-agencies-face-looming-congressional-review-act-deadline) is **May 22, 2024**.
270
+ This dashboard allows users to explore how different lookback window dates would affect the set of rules available for congressional review.
271
  """
272
  )
273
 
 
295
  bool_agency = [True if sum(selected in agency for selected in input.menu_agency()) > 0 else False for agency in filt_df["parent_slug"]]
296
  filt_df = filt_df.loc[bool_agency]
297
 
298
+ # return filtered dataframe
299
+ return filt_df
300
+
301
+
302
+ @reactive.calc
303
+ def filter_significance():
304
+
305
+ # get data filtered by date and agency
306
+ filt_df = filtered_df()
307
+
308
  # filter significance
309
  bool_ = []
310
  if (input.menu_significant() is not None) and ("all" not in input.menu_significant()):
 
313
  if "other-significant" in input.menu_significant():
314
  bool_.append((filt_df["other_significant"] == 1).to_numpy())
315
  filt_df = filt_df.loc[array(bool_).any(axis=0)]
316
+
317
  # return filtered dataframe
318
  return filt_df
319
 
320
 
321
  @reactive.calc
322
  def grouped_df_month():
323
+ filt_df = filter_significance()
324
  grouped = groupby_date(filt_df, significant=GET_SIGNIFICANT)
325
  return grouped
326
 
327
 
328
  @reactive.calc
329
  def grouped_df_day():
330
+ filt_df = filter_significance()
331
  date_col = "publication_date"
332
  grouped = groupby_date(filt_df, group_col=date_col, significant=GET_SIGNIFICANT)
333
  grouped = pad_missing_dates(
 
344
 
345
  @reactive.calc
346
  def grouped_df_week():
347
+ filt_df = filter_significance()
348
  filt_df = add_weeks_to_data(filt_df)
349
  try:
350
  grouped = groupby_date(filt_df, group_col=("week_number", "week_of"), significant=GET_SIGNIFICANT)
 
364
 
365
  @reactive.calc
366
  def grouped_df_agency():
367
+ filt_df = filter_significance()
368
  grouped = groupby_agency(filt_df, metadata=METADATA, significant=GET_SIGNIFICANT)
369
  return grouped
370