| import pandas as pd |
| from multiprocessing import Pool |
|
|
| def get_encounter_dates(df_list, pif_key): |
| encounter_dates = [] |
| for df in df_list: |
| df['encounter_date'].fillna('', inplace=True) |
| df_date = df.loc[df['pif_key'].astype(str) == str(pif_key), 'encounter_date'].values |
| if len(df_date) > 0: |
| encounter_dates.extend(df_date) |
| return encounter_dates |
|
|
| def get_latest_date(encounter_dates): |
| if encounter_dates: |
| return max(encounter_dates) |
| return '' |
|
|
| def create_date_insert_dict(ingested_date): |
| return { |
| 'attribute_name': 'report_date', |
| 'attribute_method': 'cv', |
| 'attribute_normalized_prediction': '', |
| 'attribute_prediction': str(ingested_date), |
| 'attribute_version': 'v2_090523', |
| 'attribute_vocab': '', |
| 'attribute_code': '', |
| 'date_of_service': '' |
| } |
|
|
| def add_logging_entry(logging_df, pif_key, json_report_date_exists, encounter_dates, ingested_date, multiple_date, old_date): |
| logging_df = logging_df.append({ |
| 'pif_key': pif_key, |
| 'json_report_date_exists': json_report_date_exists, |
| 'encounter_dates': encounter_dates, |
| 'ingested_date': ingested_date, |
| 'multiple_date': multiple_date, |
| 'old_date': old_date |
| }, ignore_index=True) |
| return logging_df |
|
|
| def date_dict(df_list, pif_key): |
| encounter_dates = get_encounter_dates(df_list, pif_key) |
| ingested_date = get_latest_date(encounter_dates) |
| return create_date_insert_dict(ingested_date), encounter_dates, ingested_date |
|
|
| def report_date_insertion(dict_list, df_list, logging_df): |
| col_names = {col['attribute_name'] for col in dict_list} |
| pif_key = next((col['attribute_prediction'] for col in dict_list if col['attribute_name'] == 'pif_key'), None) |
| |
| if 'report_date' not in col_names and pif_key is not None: |
| date_insert_dict, encounter_dates, ingested_date = date_dict(df_list, pif_key) |
| dict_list.insert(1, date_insert_dict) |
| logging_df = add_logging_entry(logging_df, pif_key, False, encounter_dates, ingested_date, len(encounter_dates) > 1, 'missing' if not ingested_date else '') |
|
|
| elif 'report_date' in col_names and pif_key is not None: |
| date_insert_dict, encounter_dates, ingested_date = date_dict(df_list, pif_key) |
| if ingested_date: |
| for report_date_idx, tm in enumerate(dict_list): |
| if tm['attribute_name'] == 'report_date': |
| old_date = tm['attribute_prediction'] |
| break |
| dict_list.pop(report_date_idx) |
| dict_list.insert(1, date_insert_dict) |
| logging_df = add_logging_entry(logging_df, pif_key, True, encounter_dates, ingested_date, len(encounter_dates) > 1, old_date) |
| else: |
| for report_date_idx, tm in enumerate(dict_list): |
| if tm['attribute_name'] == 'report_date': |
| old_date = tm['attribute_prediction'] |
| break |
| logging_df = add_logging_entry(logging_df, pif_key, True, None, '', False, old_date) |
|
|
| return dict_list, logging_df |
|
|
| def process_biomarker_detail(args): |
| biomarker_detail, df_list, logging_df = args |
| attributes = biomarker_detail['attribute'] |
| for attribute in attributes: |
| attribute_details = attribute['attribute_details'] |
| attribute['attribute_details'], logging_df = report_date_insertion(attribute_details, df_list, logging_df) |
| return biomarker_detail, logging_df |
|
|
| def json_report_date_insertion(json_data, df_list, logging_df): |
| biomarker_details = json_data['patient_level']['biomarkers']['details'] |
| with Pool() as pool: |
| results = pool.map(process_biomarker_detail, [(biomarker_detail, df_list, logging_df) for biomarker_detail in biomarker_details]) |
| updated_biomarker_details, updated_logging_df = zip(*results) |
| json_data['patient_level']['biomarkers']['details'] = updated_biomarker_details |
| return json_data, updated_logging_df |
|
|