|
|
import pandas as pd |
|
|
import os |
|
|
import sys |
|
|
import re |
|
|
import pickle |
|
|
import yaml |
|
|
from datetime import datetime, timedelta, timezone |
|
|
import dateutil.parser |
|
|
import pytz |
|
|
|
|
|
|
|
|
from lib.experiment_specs import study_config |
|
|
from lib.utilities import codebook |
|
|
|
|
|
""" Purpose: cleans the iso datetimes in a dataframe column |
|
|
-Input: |
|
|
- DataFrame: data - raw input data that contains the time column |
|
|
- col_name - the name of the column |
|
|
- keep_nan : keep rows with empty value for df[col_name] |
|
|
- orig_tz: when you remove the timezone adjustment, what is the timezone. if "local", then removing the timezone |
|
|
yields the local time for the participant. |
|
|
|
|
|
- Output: |
|
|
-dataframe with the following new columns: |
|
|
- {col_name}Datetime - in the phone's local time |
|
|
- {col_name}DatetimeHour |
|
|
- {col_name}Date |
|
|
|
|
|
- {col_name}EasternDatetime - in eastern time |
|
|
- {col_name}EasternDatetimeHour |
|
|
""" |
|
|
|
|
|
def clean_iso_dates(data_raw: pd.DataFrame, col_name: str, keep_nan: bool = False, orig_tz: str = "Local"): |
|
|
|
|
|
data = data_raw.loc[data_raw[col_name].notnull()] |
|
|
data[col_name + 'DatetimeTZ'] = data[col_name].apply(lambda x: dateutil.parser.parse(x).replace(microsecond=0)) |
|
|
|
|
|
|
|
|
if orig_tz == "Local": |
|
|
data[col_name + 'Datetime'] = data[col_name + 'DatetimeTZ'].apply(lambda x: x.replace(tzinfo=None)) |
|
|
data[col_name + 'DatetimeUTC'] = data[col_name + 'DatetimeTZ'].apply( |
|
|
lambda x: x.replace(tzinfo=timezone.utc) - x.utcoffset()) |
|
|
|
|
|
|
|
|
else: |
|
|
data[col_name + 'Datetime'] = data[col_name + 'DatetimeTZ'].apply( |
|
|
lambda x: x.replace(tzinfo=timezone.utc) + x.utcoffset()) |
|
|
data[col_name + 'Datetime'] = data[col_name + 'Datetime'].apply( lambda x: x.replace(tzinfo=None)) |
|
|
data[col_name + 'DatetimeUTC'] = data[col_name + 'DatetimeTZ'].apply(lambda x: x.replace(tzinfo=timezone.utc)) |
|
|
|
|
|
data[col_name + 'DatetimeHour'] = data[col_name + 'Datetime'].apply(lambda x: x.replace(minute=0, second=0)) |
|
|
data[col_name + 'Date'] = data[col_name + 'DatetimeHour'].apply(lambda x: x.date()) |
|
|
|
|
|
|
|
|
eastern = pytz.timezone('US/Eastern') |
|
|
data[col_name + 'EasternDatetime'] = data[col_name + 'DatetimeUTC'].apply( |
|
|
lambda x: x.astimezone(eastern).replace(tzinfo=None)) |
|
|
data[col_name + 'EasternDatetimeHour'] = data[col_name + 'EasternDatetime'].apply( |
|
|
lambda x: x.replace(minute=0, second=0)) |
|
|
data = data.drop(columns=[col_name, col_name + 'DatetimeTZ', col_name + 'DatetimeUTC']) |
|
|
|
|
|
if keep_nan: |
|
|
missing = data_raw.loc[data_raw[col_name].isnull()] |
|
|
data = data.append(missing) |
|
|
return data |
|
|
|
|
|
|
|
|
"""remove data files from directory""" |
|
|
def remove_files(directory): |
|
|
for file in os.listdir(directory): |
|
|
file_path = os.path.join(directory, file) |
|
|
try: |
|
|
if os.path.isfile(file_path): |
|
|
os.unlink(file_path) |
|
|
except Exception as e: |
|
|
print(e) |
|
|
|
|
|
|
|
|
""" This method inputs missing start and enddatetime for survey incompletes. This helps determine what to count as use in phase, |
|
|
for people that have not completed their surveys""" |
|
|
def inpute_missing_survey_datetimes(df, phase): |
|
|
specs = study_config.phases[phase] |
|
|
old_code = specs["StartSurvey"]["Code"] |
|
|
new_code = specs["EndSurvey"]["Code"] |
|
|
|
|
|
|
|
|
missing_end_date = min(datetime.now().replace(microsecond=0), study_config.phases[phase]["EndSurvey"]["End"]) |
|
|
|
|
|
|
|
|
if datetime.now() < study_config.phases[phase]["EndSurvey"]["Start"]: |
|
|
df.loc[(df[f"{old_code}_Complete"] == "Complete") , f"{new_code}_SurveyStartDatetime"] = missing_end_date |
|
|
|
|
|
else: |
|
|
|
|
|
df.loc[(df[f"{old_code}_Complete"] == "Complete") & |
|
|
(df[f"{new_code}_SurveyStartDatetime"].isnull()), f"{new_code}_SurveyStartDatetime"] = missing_end_date |
|
|
|
|
|
return df |
|
|
|
|
|
""" Adds survey code prefix to each column in the df""" |
|
|
def add_survey_code(df, code): |
|
|
for col in df.columns.values: |
|
|
no_prefix_cols = study_config.main_cols + study_config.embedded_main_cols |
|
|
if col not in no_prefix_cols: |
|
|
new_name = code + "_" + col |
|
|
df = df.rename(columns={col: new_name}) |
|
|
return df |
|
|
|
|
|
|
|
|
"""A function which takes the clean_master master and outputs all the variables from a phase, without the prefixes""" |
|
|
def keep_relevant_variables(df_raw, phase): |
|
|
start_code = study_config.phases[phase]["StartSurvey"]["Code"] |
|
|
end_code = study_config.phases[phase]["EndSurvey"]["Code"] |
|
|
|
|
|
"""Keep participants that completed the relevant survey in the phase""" |
|
|
df = df_raw.loc[df_raw[f"{start_code}_Complete"] == "Complete"].copy() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
keep_cols = [x for x in df.columns if f"{start_code}_" in x or x in study_config.main_cols+study_config.embedded_main_cols] |
|
|
df = df[keep_cols] |
|
|
|
|
|
|
|
|
df.columns = [x.replace(f"{start_code}_","") for x in df.columns] |
|
|
return df |
|
|
|
|
|
def add_A_to_appcode(df,appcode_col): |
|
|
df[appcode_col] = df[appcode_col].astype(str).fillna("nan") |
|
|
|
|
|
|
|
|
df[appcode_col] = df[appcode_col].apply(lambda x: int(float(x)) if (x != "nan") and (x[0] != "A") else x) |
|
|
|
|
|
|
|
|
df[appcode_col] = df[appcode_col].astype(str).apply(lambda x: "A" + x if len(x) == 8 else x) |
|
|
|
|
|
|
|
|
df["Check"] = df[appcode_col].apply(lambda x: True if (len(x) == 9) or (len(x) == 3) else False) |
|
|
l = df["Check"].value_counts() |
|
|
l_s = df.loc[df["Check"]==False] |
|
|
assert df["Check"].all() == True |
|
|
return df |
|
|
|
|
|
"returns the latest main survey that has already ended" |
|
|
def get_last_survey(): |
|
|
last_complete_time = datetime(2018, 1, 1, 0, 0) |
|
|
last_survey = "" |
|
|
surveys = study_config.main_surveys |
|
|
for survey in surveys: |
|
|
chars = study_config.surveys[survey] |
|
|
if chars["End"] < datetime.now(): |
|
|
if chars["End"] > last_complete_time: |
|
|
last_survey = survey |
|
|
last_complete_time = chars["End"] |
|
|
return last_survey |
|
|
|
|
|
|
|
|
|
|
|
def assert_common_appcode_values(df1, df2, col_list): |
|
|
common_appcodes = set(df1["AppCode"]).intersection(set(df2["AppCode"])) |
|
|
common_columns = list(set(df1.columns).intersection(set(df2.columns)).intersection(col_list)) |
|
|
compare_list = [] |
|
|
for df in [df1, df2]: |
|
|
df = df.loc[df["AppCode"].isin(common_appcodes)] |
|
|
df = df[common_columns] |
|
|
df = df.sort_values(by="AppCode").reset_index(drop=True).astype(str) |
|
|
compare_list.append(df) |
|
|
assert len(compare_list[0]) == len(compare_list[1]) |
|
|
|
|
|
c = compare_list[0].merge(compare_list[1], how='outer', on='AppCode', ) |
|
|
for col in compare_list[0].columns: |
|
|
if col == "AppCode": |
|
|
continue |
|
|
try: |
|
|
c[col + "_x"].equals(c[col + "_y"]) == True |
|
|
except: |
|
|
print(f"no match on{col}") |
|
|
print(c[col + "_x"].dtype) |
|
|
print(c[col + "_y"].dtype) |
|
|
print("First five rows that don't match:") |
|
|
print(c.loc[c[col + "_x"] != c[col + "_y"]].head()) |
|
|
sys.exit() |
|
|
|
|
|
def merge_back_master(df_master, df_phase, phase): |
|
|
""" add prefixes to a phase specific df, and merge it to master""" |
|
|
codebook_dic = pd.read_csv(codebook.codebook_path, index_col="VariableName").to_dict(orient='index') |
|
|
df_phase.columns = [codebook.add_prefix_var(x, phase, codebook_dic) for x in df_phase.columns] |
|
|
new_cols = ["AppCode"] + list(set(df_phase.columns) - set(df_master.columns)) |
|
|
df_master = df_master.merge(df_phase[new_cols], how='outer', left_on="AppCode", right_on="AppCode") |
|
|
return df_master |