|
|
|
|
|
|
|
import asyncio |
|
from datetime import datetime, date, time |
|
from pathlib import Path |
|
|
|
from pandas import DataFrame |
|
from numpy import array |
|
|
|
from modules import ( |
|
DF, |
|
LAST_UPDATED, |
|
START_DATE, |
|
WINDOW_OPEN_DATE, |
|
GET_SIGNIFICANT, |
|
METADATA, |
|
AGENCIES, |
|
groupby_agency, |
|
groupby_date, |
|
add_week_info_to_data, |
|
pad_missing_dates, |
|
plot_agency, |
|
plot_tf, |
|
plot_NA, |
|
plot_NA, |
|
) |
|
|
|
from shiny import reactive |
|
from shiny.express import input, render, ui |
|
|
|
|
|
ui.include_css( Path(__file__).parent.joinpath("www") / "style.css") |
|
|
|
|
|
|
|
|
|
|
|
|
|
TITLE = "CRA Window Exploratory Dashboard - GW Regulatory Studies Center" |
|
|
|
|
|
HEADER = "Congressional Review Act (CRA) Window Exploratory Dashboard" |
|
page_header = ui.HTML( |
|
f""" |
|
<div class="header"> |
|
<h1>{HEADER}</h1> |
|
</div> |
|
""" |
|
) |
|
|
|
|
|
sidebar_logo = ui.HTML( |
|
f""" |
|
<div class="header"> |
|
<a href="https://go.gwu.edu/regstudies" target="_blank"> |
|
<img src="logo.png" alt="Regulatory Studies Center logo"/> |
|
</a> |
|
</div> |
|
""" |
|
) |
|
|
|
|
|
FOOTER = f""" |
|
----- |
|
|
|
© 2024 [GW Regulatory Studies Center](https://go.gwu.edu/regstudies). See our page on the [Congressional Review Act](https://regulatorystudies.columbian.gwu.edu/congressional-review-act) for more information. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
ui.tags.title(TITLE) |
|
|
|
page_header |
|
|
|
|
|
with ui.sidebar(open={"desktop": "open", "mobile": "closed"}, fg="#033C5A"): |
|
sidebar_logo |
|
|
|
with ui.tooltip(placement="right", id="window_tooltip"): |
|
ui.input_date("start_date", "Select start of window", value=WINDOW_OPEN_DATE, min=START_DATE, max=date.today()) |
|
"The CRA lookback window is estimated to open as early as August 1, 2024. Select a different date to explore how different lookback dates would affect the set of rules available for congressional review. See the notes for more information." |
|
|
|
with ui.tooltip(placement="right", id="sig_tooltip"): |
|
ui.input_select("menu_significant", "Select rule significance", choices=["all", "3f1-significant", "other-significant"], selected="all", multiple=True, size=3) |
|
"Rule significance as defined in Executive Order 12866, as amended by Executive Order 14094." |
|
|
|
with ui.tooltip(placement="right", id="agency_tooltip"): |
|
ui.input_select("menu_agency", "Select agencies", choices=["all"] + AGENCIES, selected=["all"], multiple=True, size=6) |
|
"Select one or more parent-level agencies." |
|
|
|
|
|
with ui.layout_column_wrap(): |
|
with ui.value_box(class_="summary-values"): |
|
"All final rules" |
|
with ui.tooltip(placement="bottom", id="all_tooltip"): |
|
@render.text |
|
def count_rules(): |
|
return f"{filtered_df()['document_number'].count()}" |
|
f"Federal Register data last retrieved {date.today()}." |
|
|
|
with ui.value_box(class_="summary-values"): |
|
"Section 3(f)(1) Significant rules" |
|
with ui.tooltip(placement="bottom", id="3f1_tooltip"): |
|
@render.text |
|
def count_3f1_significant(): |
|
output = "Not available" |
|
if GET_SIGNIFICANT: |
|
output = f"{filtered_df()['3f1_significant'].sum()}" |
|
return output |
|
f"Executive Order 12866 significance data last updated {LAST_UPDATED}." |
|
|
|
with ui.value_box(class_="summary-values"): |
|
"Other Significant rules" |
|
with ui.tooltip(placement="bottom", id="other_tooltip"): |
|
@render.text |
|
def count_other_significant(): |
|
output = "Not available" |
|
if GET_SIGNIFICANT: |
|
output = f"{filtered_df()['other_significant'].sum()}" |
|
return output |
|
f"Executive Order 12866 significance data last updated {LAST_UPDATED}." |
|
|
|
|
|
with ui.navset_card_underline(title=""): |
|
|
|
with ui.nav_panel("Rules in detail"): |
|
with ui.card(full_screen=True): |
|
@render.data_frame |
|
def table_rule_detail(): |
|
df = filter_significance().copy() |
|
df.loc[:, "date"] = df.loc[:, "publication_date"].apply(lambda x: f"{x.date()}") |
|
char, limit = " ", 10 |
|
df.loc[:, "title"] = df["title"].apply(lambda x: x if len(x.split(char)) < (limit + 1) else f"{char.join(x.split(char)[:limit])}...") |
|
df.loc[:, "agencies"] = df["parent_slug"].apply(lambda x: "; ".join(x)) |
|
cols = [ |
|
"date", |
|
"title", |
|
"agencies", |
|
"3f1_significant", |
|
"other_significant", |
|
] |
|
return render.DataGrid(df.loc[:, [c for c in cols if c in df.columns]], width="100%") |
|
|
|
with ui.nav_panel("Over time"): |
|
|
|
ui.input_select("frequency", "Select frequency", choices=["daily", "weekly", "monthly"], selected="weekly") |
|
|
|
with ui.layout_columns(): |
|
|
|
with ui.card(full_screen=True): |
|
|
|
@render.plot |
|
def plot_over_time(value_col: str = "rules"): |
|
grouped = get_grouped_data_over_time() |
|
values = grouped.loc[:, value_col].to_numpy() |
|
count_gte_zero = sum(1 if g > 0 else 0 for g in values) |
|
max_val = max(values, default=0) |
|
if (max_val < 2) or (count_gte_zero < 2): |
|
return plot_NA() |
|
else: |
|
return plot_tf( |
|
grouped, |
|
input.frequency(), |
|
rule_types=input.menu_significant(), |
|
) |
|
|
|
with ui.card(full_screen=True): |
|
@render.data_frame |
|
def table_over_time(): |
|
grouped = get_grouped_data_over_time() |
|
date_cols = ["publication_date", "week_of", ] |
|
if any(d in grouped.columns for d in date_cols): |
|
grouped = grouped.astype({d: "str" for d in date_cols if d in grouped.columns}, errors="ignore") |
|
grouped = grouped.rename(columns={ |
|
"publication_year": "year", |
|
"publication_month": "month", |
|
"publication_date": "date", |
|
}, errors="ignore") |
|
cols = [ |
|
"date", |
|
"year", |
|
"month", |
|
"week_of", |
|
"rules", |
|
"3f1_significant", |
|
"other_significant", |
|
] |
|
return render.DataTable(grouped.loc[:, [c for c in cols if c in grouped.columns]]) |
|
|
|
with ui.nav_panel("By agency"): |
|
|
|
with ui.layout_columns(): |
|
|
|
with ui.card(full_screen=True): |
|
@render.plot |
|
def plot_by_agency(): |
|
grouped = grouped_df_agency() |
|
if len(grouped) < 2: |
|
return plot_NA() |
|
else: |
|
plot = plot_agency( |
|
grouped.head(10), |
|
rule_types=input.menu_significant(), |
|
) |
|
return plot |
|
|
|
with ui.card(full_screen=True): |
|
@render.data_frame |
|
def table_by_agency(): |
|
grouped = grouped_df_agency() |
|
cols = [ |
|
"agency", |
|
"acronym", |
|
"rules", |
|
"3f1_significant", |
|
"other_significant", |
|
] |
|
return render.DataTable(grouped.loc[:, [c for c in cols if c in grouped.columns]]) |
|
|
|
|
|
with ui.accordion(open=False): |
|
|
|
with ui.accordion_panel("Download Data"): |
|
|
|
@render.download( |
|
label="Download data as CSV", |
|
filename=f"rules_in_cra_window_accessed_{date.today()}.csv", |
|
) |
|
async def download( |
|
output_cols: tuple | list = ( |
|
"document_number", |
|
"citation", |
|
"publication_date", |
|
"title", |
|
"type", |
|
"action", |
|
"json_url", |
|
"html_url", |
|
"agencies", |
|
"independent_reg_agency", |
|
"parent_agencies", |
|
"subagencies", |
|
"president_id", |
|
"significant", |
|
"3f1_significant", |
|
"other_significant" |
|
) |
|
): |
|
filt_df = filtered_df().copy() |
|
filt_df.loc[:, "agencies"] = filt_df.loc[:, "agency_slugs"].apply(lambda x: "; ".join(x)) |
|
filt_df.loc[:, "parent_agencies"] = filt_df.loc[:, "parent_slug"].apply(lambda x: "; ".join(x)) |
|
filt_df.loc[:, "subagencies"] = filt_df.loc[:, "subagency_slug"].apply(lambda x: "; ".join(x)) |
|
await asyncio.sleep(0.25) |
|
yield filt_df.loc[:, [c for c in output_cols if c in filt_df.columns]].to_csv(index=False) |
|
|
|
|
|
with ui.accordion(open=False): |
|
|
|
with ui.accordion_panel("Notes"): |
|
|
|
ui.markdown( |
|
f""" |
|
The [Congressional Review Act](https://uscode.house.gov/view.xhtml?req=granuleid%3AUSC-prelim-title5-chapter8&saved=%7CKHRpdGxlOjUgc2VjdGlvbjo4MDEgZWRpdGlvbjpwcmVsaW0pIE9SIChncmFudWxlaWQ6VVNDLXByZWxpbS10aXRsZTUtc2VjdGlvbjgwMSk%3D%7CdHJlZXNvcnQ%3D%7C%7C0%7Cfalse%7Cprelim&edition=prelim) (CRA) “lookback window” refers to the period starting [60 working days](https://crsreports.congress.gov/product/pdf/R/R46690#page=8) (either session days in the Senate or legislative days in the House of Representatives) before the current session of Congress adjourns and ending the day the subsequent session of Congress first convenes. |
|
Rules that are published in the Federal Register and submitted to Congress after the lookback day are made available for review in the subsequent session of Congress. |
|
Due to the retrospective calculation of the window, lookback window dates prior to Congress adjourning are inherently estimates. |
|
The lookback date could fall as early as [August 1, 2024](https://crsreports.congress.gov/product/pdf/IN/IN12408). Lookback dates for recent Congresses have fallen in mid-August. |
|
This dashboard allows users to explore how different lookback window dates would affect the set of rules available for congressional review. |
|
|
|
"Section 3(f)(1) significant" rules are regulations that meet the criteria in Section 3(f)(1) of [Executive Order 12866](https://www.archives.gov/files/federal-register/executive-orders/pdf/12866.pdf), as amended by [Executive Order 14094](https://www.govinfo.gov/content/pkg/FR-2023-04-11/pdf/2023-07760.pdf), referring to those with an estimated annual effect on the economy of $200 million or more. |
|
"Other significant" rules are regulations that meet the other criteria in Section 3(f) of Executive Order 12866, as amended by Executive Order 14094, such as those creating inconsistency with other agencies' actions, altering certain budgetary impacts, or raising legal or policy issues pertaining to the president's priorities. |
|
|
|
Rule data are retrieved daily from the [Federal Register API](https://www.federalregister.gov/developers/documentation/api/v1), which publishes new editions of the Federal Register each business day. |
|
""" |
|
) |
|
|
|
|
|
ui.markdown( |
|
FOOTER |
|
) |
|
|
|
|
|
|
|
|
|
|
|
@reactive.calc |
|
def filtered_df(agency_column: str = "parent_slug"): |
|
filt_df = DF |
|
|
|
|
|
try: |
|
filt_df = filt_df.loc[filt_df["publication_date"] >= input.start_date()] |
|
except TypeError: |
|
filt_df = filt_df.loc[filt_df["publication_date"] >= datetime.combine(input.start_date(), time(0, 0))] |
|
|
|
|
|
if (input.menu_agency() is not None) and ("all" not in input.menu_agency()): |
|
bool_agency = [True if sum(selected in agency for selected in input.menu_agency()) > 0 else False for agency in filt_df[agency_column]] |
|
filt_df = filt_df.loc[bool_agency] |
|
|
|
|
|
return filt_df |
|
|
|
|
|
@reactive.calc |
|
def filter_significance(): |
|
|
|
|
|
filt_df = filtered_df() |
|
|
|
|
|
bool_ = [] |
|
if (input.menu_significant() is not None) and ("all" not in input.menu_significant()): |
|
if "3f1-significant" in input.menu_significant(): |
|
bool_.append((filt_df["3f1_significant"] == 1).to_numpy()) |
|
if "other-significant" in input.menu_significant(): |
|
bool_.append((filt_df["other_significant"] == 1).to_numpy()) |
|
filt_df = filt_df.loc[array(bool_).any(axis=0)] |
|
|
|
|
|
return filt_df |
|
|
|
|
|
@reactive.calc |
|
def grouped_df_month(): |
|
filt_df = filter_significance() |
|
grouped = groupby_date(filt_df, significant=GET_SIGNIFICANT) |
|
return grouped |
|
|
|
|
|
@reactive.calc |
|
def grouped_df_day(): |
|
filt_df = filter_significance() |
|
date_col = "publication_date" |
|
grouped = groupby_date(filt_df, group_col=date_col, significant=GET_SIGNIFICANT) |
|
grouped = pad_missing_dates( |
|
grouped, |
|
date_col, |
|
"days", |
|
fill_padded_values={ |
|
"rules": 0, |
|
"3f1_significant": 0, |
|
"other_significant": 0, |
|
}) |
|
return grouped |
|
|
|
|
|
@reactive.calc |
|
def grouped_df_week(): |
|
filt_df = filter_significance() |
|
filt_df = add_week_info_to_data(filt_df) |
|
try: |
|
grouped = groupby_date(filt_df, group_col=("week_number", "week_of"), significant=GET_SIGNIFICANT) |
|
grouped = pad_missing_dates( |
|
grouped, |
|
"week_of", |
|
how="weeks", |
|
fill_padded_values={ |
|
"rules": 0, |
|
"3f1_significant": 0, |
|
"other_significant": 0, |
|
}) |
|
except KeyError as err: |
|
grouped = DataFrame(columns=["week_number", "week_of", "rules", "3f1_significant", "other_significant"]) |
|
return grouped |
|
|
|
|
|
@reactive.calc |
|
def grouped_df_agency(): |
|
filt_df = filter_significance() |
|
grouped = groupby_agency(filt_df, metadata=METADATA, significant=GET_SIGNIFICANT) |
|
return grouped |
|
|
|
|
|
@reactive.calc |
|
def get_grouped_data_over_time(): |
|
if input.frequency() == "daily": |
|
grouped = grouped_df_day() |
|
elif input.frequency() == "monthly": |
|
grouped = grouped_df_month() |
|
elif input.frequency() == "weekly": |
|
grouped = grouped_df_week() |
|
else: |
|
raise ValueError("Only 'daily', 'monthly', or 'weekly' are valid inputs.") |
|
return grouped |
|
|