anonymous-submission-acl2025's picture
add 17
8a79f2e
import os
import sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from lib.experiment_specs import study_config
from lib.data_helpers import test
from lib.data_helpers import data_utils
from data.source.clean_master.outcome_variable_cleaners import outcome_cleaner
from data.source.exporters.master_contact_generator import MasterContactGenerator
from data.source.clean_master.management.baseline_prep import BaselinePrep
from data.source.clean_master.management.midline_prep import MidlinePrep
from data.source.clean_master.management.endline1_prep import Endline1Prep
from data.source.clean_master.management.endline2_prep import Endline2Prep
from data.source.clean_master.management.earnings import Earnings
from lib.utilities import serialize
np.random.seed(12423534)
"""
cleans the aggregated raw master user level data file by:
- adding treatment/payment variables
- creates outcome variables and indices
-
"""
class Cleaner():
used_contact_list_directory = os.path.join("data","external","dropbox_confidential","ContactLists","Used")
master_file = os.path.join("data","external","intermediate", "MasterCleanUser")
master_test_file = os.path.join("data","external","intermediate_test", "MasterCleanUser")
qual_path = os.path.join("data", "external", "dropbox_confidential", "QualitativeFeedback")
def __init__(self):
self.treatment_cl = pd.DataFrame()
self.used_contact_lists = self._import_used_contact_lists()
self.config_user_dict = serialize.open_yaml("config_user.yaml")
self.survey_prep_functions = {"Baseline":BaselinePrep.main,
"Midline":MidlinePrep.main,
"Endline1":Endline1Prep.main,
"Endline2":Endline2Prep.main}
#add filler surveys
for survey in study_config.surveys.keys():
if "Phase" in survey:
self.survey_prep_functions[survey] = Endline2Prep.filler
def clean_master(self,raw_master_df):
df = self._prepare_proper_sample(raw_master_df)
"""Prepare Outcome Variables"""
df = self.ingest_qual_data("PDBug", df)
df = outcome_cleaner.clean_outcome_vars(df)
"""Prepare Embedded Data for Upcoming Surveys or Ingest Embedded Data from Used CLs"""
for phase_name, chars in study_config.phases.items():
start_survey = chars["StartSurvey"]["Name"]
end_survey = chars["EndSurvey"]["Name"]
if (datetime.now() < study_config.surveys[start_survey]["Start"]+ timedelta(3)):
print(f"\n No action for {end_survey} Randomization, {phase_name} isn't 3 days in")
continue
if (datetime.now() > study_config.surveys[start_survey]["Start"] + timedelta(3)) & (datetime.now() < study_config.surveys[end_survey]["Start"]):
print(f"\n Prepping {end_survey} Randomization")
df = self.survey_prep_functions[end_survey](df)
else:
if end_survey in study_config.filler_surveys:
continue
elif end_survey not in self.used_contact_lists:
print(f"{end_survey} CL needs to be in used CL!! Need used treatment assignments")
sys.exit()
else:
print(f"\n Adding embedded data on {end_survey} using CL, since {phase_name} is over")
df = self._add_cl_data(df,end_survey)
"""Calculate Earnings"""
df = Earnings().create_payment_vars(df)
self.sanity_checks(df)
if self.config_user_dict['local']['test']:
test.save_test_df(df,self.master_test_file)
else:
test.select_test_appcodes(df)
serialize.save_pickle(df, self.master_file)
df_str = df.copy().astype(str).applymap(lambda x: x.strip().replace("\n", "").replace('"', ''))
df_str.to_csv(self.master_file+".csv", index = False)
#seed_file = os.path.join("data","external","intermediate","Scratch",
# "BalanceChecks",f"MasterCleanUser{study_config.seed}.csv")
#df_str.to_csv(seed_file, index=False)
return df
"""import used contact lists"""
def _import_used_contact_lists(self):
contact_lists = {}
for survey, cl_name in study_config.used_contact_lists.items():
contact_lists[survey] = MasterContactGenerator.read_in_used_cl(cl_name,survey)
return contact_lists
def _prepare_proper_sample(self, df):
"""
This method crops the raw_master_user df to folks that attempted to complete registration
The method also asserts that each row is identified by a unique appcode
# We want to keep people that never downloaded the app but ATTEMPTED TO COMPLETE registration for attrition analysis
# Attempted to keep registration means, they saw the consent form, and clicked continue, though they may not
# have downloaded the app.
"""
initial_code = study_config.surveys[study_config.initial_master_survey]["Code"]
df = df.loc[df[f"{initial_code}_Complete"] != "nan"].dropna(
subset=[f"{initial_code}_Complete"])
# Reverse Order of DF so complete appear at top
df = df.iloc[::-1].reset_index(drop=True)
if study_config.surveys[study_config.initial_master_survey]["End"] < datetime.now():
try:
assert len(df) == study_config.sample_size
except:
print(f"length of df ( {len(df)}) not same size as study_config.sample_size: {study_config.sample_size}")
sys.exit()
appcode_series = df.loc[df["AppCode"].notnull(), 'AppCode']
assert (len(appcode_series) == len(appcode_series.unique()))
return df
def ingest_qual_data(self, survey, df):
file = study_config.qualitative_feedback_files[survey]
code = study_config.surveys[survey]["Code"]
q = pd.read_csv(os.path.join(self.qual_path, file))
q = data_utils.add_A_to_appcode(q, "AppCode")
pii_cols = sum([x for x in study_config.id_cols.values()], [])
for col in q.columns:
if col in pii_cols + ["RecipientEmail"]:
q = q.drop(columns=[col])
elif col in study_config.main_cols+study_config.embedded_main_cols:
continue
else:
q = q.rename(columns={col: code + "_" + col})
q = q.loc[(~q.duplicated(subset=["AppCode"], keep='last'))]
new_cols = ["AppCode"] + list(set(q.columns) - set(df.columns))
print(new_cols)
df = df.merge(q[new_cols], how='left', on='AppCode')
return df
def _add_cl_data(self,df,survey):
"""Override all the treatment columns created, and insert those created in the used contact list
Also Add Used CL avg daily use data"""
old_phase = study_config.surveys[survey]["OldPhase"]
prev_code = study_config.phases[old_phase]["StartSurvey"]["Code"]
cl = self.used_contact_lists[survey]
cl = cl.rename(columns={"PastActual": f"{prev_code}_Cl{study_config.use_var}"})
cl[f"{prev_code}_Cl{study_config.use_var}"] = pd.to_numeric(cl[f"{prev_code}_Cl{study_config.use_var}"], errors = 'coerce')
# only keep prefixed columns (i.e. have a "_") that are not in the main df or main cols not in df
cl_vars_to_merge = ["AppCode"] + [x for x in cl.columns.values if ((x not in df.columns) & ("_" in x)) |
((x not in df.columns) & (x in study_config.embedded_main_cols))]
print(f"\t {cl_vars_to_merge}")
df = df.merge(cl[cl_vars_to_merge], how ='left',on = "AppCode")
return df
"""check that used contact list column values match the recreation the clean_master master"""
def sanity_checks(self,df):
# Assert no obs were dropped in cleaning
if study_config.surveys["Baseline"]["End"] < datetime.now():
if len(df) != study_config.sample_size:
print(f"CleanMaster (len = {len(df)}) is not same size a hard coded sample size ({study_config.sample_size})")
sys.exit()
appcode_series = df.loc[df["AppCode"].notnull(), 'AppCode']
assert (len(appcode_series) == len(appcode_series.unique()))