|
|
import pandas as pd |
|
|
import os |
|
|
import shutil |
|
|
import re |
|
|
from functools import reduce |
|
|
from datetime import datetime, timedelta |
|
|
|
|
|
from lib.experiment_specs import study_config |
|
|
from lib.data_helpers import data_utils |
|
|
|
|
|
"""loads the phone data config from the provided config path""" |
|
|
|
|
|
class BuilderUtils(): |
|
|
|
|
|
def get_config(self, config_path): |
|
|
if os.path.isfile(config_path): |
|
|
pd_config_df = pd.read_csv(config_path,index_col= "index") |
|
|
pd_config_dict = pd_config_df.to_dict(orient = 'index') |
|
|
return pd_config_dict |
|
|
else: |
|
|
return {} |
|
|
|
|
|
""" |
|
|
- Purpose: transports zipped files from PhoneDashboardPort and PCPort to the PhoneAddictionDropbox to the specified directory |
|
|
- Inputs: |
|
|
- port: specifies location of the port |
|
|
- keyword: specifies the kind of inport from the source (e.g. budget, use, etc). the keyword must be in the file name for the function to work |
|
|
- new_directory: the directory where the files will be transported |
|
|
- """ |
|
|
def transport_new_zip_files(self,port,keyword,new_directory): |
|
|
new_adds = [] |
|
|
added_files = os.listdir(new_directory) |
|
|
empty_files_dir = os.listdir(os.path.join("data","external","input","PhoneDashboard","BuggyFiles","Empty")) |
|
|
for zipfile in os.listdir(port): |
|
|
|
|
|
if ".zip" not in zipfile: |
|
|
continue |
|
|
|
|
|
|
|
|
if keyword == "UseIndiv": |
|
|
keyword = "Use" |
|
|
|
|
|
|
|
|
if ("full" in zipfile) & (keyword == "Use"): |
|
|
new_zipfile = zipfile.replace("full","use") |
|
|
os.rename(os.path.join(port, zipfile), os.path.join(port, new_zipfile)) |
|
|
zipfile = new_zipfile |
|
|
|
|
|
|
|
|
if ("snooze_delays" in zipfile): |
|
|
new_zipfile = zipfile.replace("snooze_","") |
|
|
os.rename(os.path.join(port, zipfile), os.path.join(port, new_zipfile)) |
|
|
zipfile = new_zipfile |
|
|
|
|
|
if (keyword.lower() not in zipfile) and (keyword.upper() not in zipfile): |
|
|
continue |
|
|
|
|
|
|
|
|
if zipfile in added_files: |
|
|
continue |
|
|
|
|
|
|
|
|
if zipfile in empty_files_dir: |
|
|
try: |
|
|
old_file = os.path.join(port, zipfile) |
|
|
new_file = os.path.join(port, "Empty", zipfile) |
|
|
os.rename(old_file, new_file) |
|
|
except: |
|
|
print(f"{zipfile}couldn't move zipfile to PDPort/Empty") |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
match = re.search(r'\d{4}-\d{2}-\d{2}', zipfile) |
|
|
zip_date = datetime.strptime(match.group(), '%Y-%m-%d') |
|
|
if zip_date <= study_config.first_pull or zip_date >= study_config.last_pull: |
|
|
continue |
|
|
|
|
|
|
|
|
else: |
|
|
old_file_path = os.path.join(port,zipfile) |
|
|
new_file_path = os.path.join(new_directory,zipfile) |
|
|
new_adds.append(zipfile) |
|
|
shutil.copy(old_file_path,new_file_path) |
|
|
print(new_adds) |
|
|
return new_adds |
|
|
|
|
|
""" updates the existing config by adding the new config entries, and saves the updated config""" |
|
|
def update_config(self,existing,new,config_path): |
|
|
existing.update(new) |
|
|
pd_config_df = pd.DataFrame.from_dict(existing, orient='index').reset_index() |
|
|
pd_config_df.to_csv(config_path, index=False) |
|
|
|
|
|
|
|
|
"""Default raw data processor invoked by event_puller.py""" |
|
|
@staticmethod |
|
|
def default_puller_process(df: pd.DataFrame, zip_file: str, event_puller): |
|
|
for time_col in event_puller.time_cols: |
|
|
df = data_utils.clean_iso_dates(df, time_col, keep_nan=False, orig_tz=event_puller.raw_timezone) |
|
|
df = df.drop(columns=[time_col + "Date", time_col + "DatetimeHour", time_col + "EasternDatetimeHour"]) |
|
|
df = df.rename(columns={time_col + "Datetime": time_col}) |
|
|
|
|
|
if "TimeZone" in df.columns: |
|
|
df = df.drop(columns=["TimeZone"]) |
|
|
|
|
|
match = re.search(r'\d{4}-\d{2}-\d{2}', zip_file) |
|
|
df["AsOf"] = datetime.strptime(match.group(), '%Y-%m-%d') |
|
|
df["AsOf"] = df["AsOf"].apply(lambda x: x.date()) |
|
|
return df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod |
|
|
def add_phase_label(raw_df, raw_df_date, start_buffer=1, end_buffer=-1): |
|
|
df = raw_df.copy() |
|
|
if "Phase" in df.columns.values: |
|
|
df = df.drop(columns="Phase") |
|
|
|
|
|
for phase, specs in study_config.phases.items(): |
|
|
|
|
|
if datetime.now() > specs["StartSurvey"]["Start"] + timedelta(1): |
|
|
start_date = (study_config.phases[phase]["StartSurvey"]["Start"] + timedelta(start_buffer)).date() |
|
|
end_date = (study_config.phases[phase]["EndSurvey"]["Start"] + timedelta(end_buffer)).date() |
|
|
df.loc[(df[raw_df_date] >= start_date) & (df[raw_df_date] <= end_date), "Phase"] = phase |
|
|
|
|
|
df["Phase"] = df["Phase"].astype('category') |
|
|
return df |
|
|
|
|
|
""" |
|
|
Purpose: Iterates through a subsets dict and creates new avg daily use columns |
|
|
|
|
|
One key-value pair of a subset dict: |
|
|
|
|
|
"PCSC" : { |
|
|
"Filters": {"SCBool":[True]}, |
|
|
"DenomCol": "DaysWithUse"}, |
|
|
|
|
|
""" |
|
|
@staticmethod |
|
|
def get_subsets_avg_use(df_p, subsets: dict): |
|
|
subset_dfs = [] |
|
|
for label, specs in subsets.items(): |
|
|
filters = specs["Filters"] |
|
|
denom_col = specs["DenomCol"] |
|
|
num_cols = specs["NumCols"] |
|
|
subset_df = BuilderUtils.subset_avg_use(df_p, label, filters, denom_col,num_cols) |
|
|
subset_dfs.append(subset_df) |
|
|
df_merged = reduce(lambda x, y: pd.merge(x, y, on='AppCode', how = 'outer'), subset_dfs) |
|
|
|
|
|
|
|
|
|
|
|
df_merged = df_merged.fillna(0) |
|
|
return df_merged |
|
|
|
|
|
""" |
|
|
Input: |
|
|
- df: the event level df in the given phase |
|
|
- label: the variable label |
|
|
- specs: {variables to subset on: values of variables to keep} |
|
|
- denom_col: the column name of the variable in the df which contains the denomenator value |
|
|
- if == "NAN", the function will create it's own denomenator equal to days for which there is non-zero use for |
|
|
the given subset |
|
|
- num_cols: list of columns to sum over (often it's just [Use], but it can be [Checks,Pickups,Use] |
|
|
""" |
|
|
@staticmethod |
|
|
def subset_avg_use(df: pd.DataFrame, label: str, filters: dict, denom_col: str, num_cols: list): |
|
|
|
|
|
if len(filters) == 0: |
|
|
pass |
|
|
|
|
|
|
|
|
else: |
|
|
for var, keep_vals in filters.items(): |
|
|
df = df.loc[df[var].isin(keep_vals),:] |
|
|
|
|
|
for col in [denom_col]+[num_cols]: |
|
|
df[col] = df[col].fillna(0) |
|
|
|
|
|
sum_df = df.groupby(by=['AppCode',denom_col], as_index=False)[num_cols].sum() |
|
|
|
|
|
sum_dfs = [] |
|
|
for num_col in num_cols: |
|
|
sum_df = sum_df.rename(columns={num_col: f"{label}{num_col}Total"}) |
|
|
sum_df[f"{label}{num_col}Total"] = sum_df[f"{label}{num_col}Total"].round(0) |
|
|
sum_df[f"{label}{num_col}"] = (sum_df[f"{label}{num_col}Total"] / (sum_df[denom_col])).round(0) |
|
|
sum_dfs.append(sum_df[["AppCode", f"{label}{num_col}", f"{label}{num_col}Total"]]) |
|
|
final = reduce(lambda df1, df2: pd.merge(df1, df2, on='AppCode', how = 'outer'), sum_dfs) |
|
|
return final |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod |
|
|
def add_personal_phase_label(raw_df, raw_master, raw_df_date, start_buffer=1, end_buffer=-1, drop_bool=True): |
|
|
df = raw_df.copy() |
|
|
if "Phase" in df.columns.values: |
|
|
df = df.drop(columns="Phase") |
|
|
|
|
|
for phase, specs in study_config.phases.items(): |
|
|
|
|
|
|
|
|
if datetime.now() > specs["StartSurvey"]["Start"] + timedelta(1): |
|
|
|
|
|
raw_master = data_utils.inpute_missing_survey_datetimes(raw_master, phase) |
|
|
old_code = study_config.phases[phase]["StartSurvey"]["Code"] |
|
|
new_code = study_config.phases[phase]["EndSurvey"]["Code"] |
|
|
start_col = f"{old_code}_SurveyEndDatetime" |
|
|
end_col = f"{new_code}_SurveyStartDatetime" |
|
|
|
|
|
df = df.merge(raw_master[["AppCode", start_col, end_col]], on="AppCode", how="inner") |
|
|
for col in [start_col, end_col]: |
|
|
df[col] = pd.to_datetime(df[col], infer_datetime_format=True).apply(lambda x: x.date()) |
|
|
|
|
|
df.loc[(df[raw_df_date] >= df[start_col].apply(lambda x: x + timedelta(start_buffer))) |
|
|
& (df[raw_df_date] <= df[end_col].apply(lambda x: x + timedelta(end_buffer))), "Phase"] = phase |
|
|
|
|
|
if drop_bool: |
|
|
df = df.drop(columns=[start_col, end_col]) |
|
|
df["Phase"] = df["Phase"].astype('category') |
|
|
return df |
|
|
|