project1 / project1.py
Jiwonny29's picture
Create project1.py
a87c1f0 verified
raw history blame
No virus
8.01 kB
import datasets
import pandas as pd
import numpy as np
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
_HOMEPAGE = ""
_LICENSE = ""
class HealthStatisticsDataset(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"Year": datasets.Value("int32"),
"LocationAbbr": datasets.Value("string"),
"LocationDesc": datasets.Value("string"),
"Latitude": datasets.Value("float32"),
"Longitude": datasets.Value("float32"),
"Disease_Type": datasets.Value("int32"),
"Data_Value_Type": datasets.Value("int32"),
"Data_Value": datasets.Value("float32"),
"Break_Out_Category": datasets.Value("string"),
"Break_Out_Details": datasets.Value("string"),
"Break_Out_Type": datasets.Value("int32"),
"Life_Expectancy": datasets.Value("float32")
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data = pd.read_csv(dl_manager.download_and_extract("https://docs.google.com/uc?export=download&id=1eChYmZ3RMq1v-ek1u6DD2m_dGIrz3sbi&confirm=t"))
processed_data = self.preprocess_data(data)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data": processed_data},
),
]
def _generate_examples(self, data):
for key, row in data.iterrows():
year = int(row['Year']) if 'Year' in row else None
latitude, longitude = None, None
if isinstance(row['Geolocation'], str):
geo_str = row['Geolocation'].replace('POINT (', '').replace(')', '')
longitude, latitude = map(float, geo_str.split())
yield key, {
"Year": year,
"LocationAbbr": row.get('LocationAbbr', None),
"LocationDesc": row.get('LocationDesc', None),
"Latitude": latitude,
"Longitude": longitude,
"Disease_Type": int(row["Disease_Type"]) if "Disease_Type" in row else None,
"Data_Value_Type": int(row["Data_Value_Type"]) if "Data_Value_Type" in row else None,
"Data_Value": float(row["Data_Value"]) if "Data_Value" in row else None,
"Break_Out_Category": row.get("Break_Out_Category", None),
"Break_Out_Details": row.get("Break_Out_Details", None),
"Break_Out_Type": int(row["Break_Out_Type"]) if 'Break_Out_Type' in row else None,
"Life_Expectancy": float(row["Life_Expectancy"]) if row.get("Life_Expectancy") else None
}
@staticmethod
def preprocess_data(data):
data = data[['YearStart', 'LocationAbbr', 'LocationDesc', 'Geolocation', 'Topic', 'Question', 'Data_Value_Type', 'Data_Value', 'Data_Value_Alt',
'Low_Confidence_Limit', 'High_Confidence_Limit', 'Break_Out_Category', 'Break_Out']]
pd.options.mode.chained_assignment = None
disease_columns = [
'Major cardiovascular disease mortality rate among US adults (18+); NVSS',
'Diseases of the heart (heart disease) mortality rate among US adults (18+); NVSS',
'Acute myocardial infarction (heart attack) mortality rate among US adults (18+); NVSS',
'Coronary heart disease mortality rate among US adults (18+); NVSS',
'Heart failure mortality rate among US adults (18+); NVSS',
'Cerebrovascular disease (stroke) mortality rate among US adults (18+); NVSS',
'Ischemic stroke mortality rate among US adults (18+); NVSS',
'Hemorrhagic stroke mortality rate among US adults (18+); NVSS'
]
disease_column_mapping = {column_name: index for index, column_name in enumerate(disease_columns)}
data['Question'] = data['Question'].apply(lambda x: disease_column_mapping.get(x, -1))
sex_columns = ['Male', 'Female']
sex_column_mapping = {column_name: index + 1 for index, column_name in enumerate(sex_columns)}
age_columns = ['18-24', '25-44', '45-64', '65+']
age_column_mapping = {column_name: index + 1 for index, column_name in enumerate(age_columns)}
race_columns = ['Non-Hispanic White', 'Non-Hispanic Black', 'Hispanic', 'Other']
race_column_mapping = {column_name: index + 1 for index, column_name in enumerate(race_columns)}
def map_break_out_category(value):
if value in sex_column_mapping:
return sex_column_mapping[value]
elif value in age_column_mapping:
return age_column_mapping[value]
elif value in race_column_mapping:
return race_column_mapping[value]
else:
return value
data['Break_Out_Type'] = data['Break_Out'].apply(map_break_out_category)
data.drop(columns=['Topic', 'Low_Confidence_Limit', 'High_Confidence_Limit', 'Data_Value_Alt'], axis=1, inplace=True)
data['Data_Value_Type'] = data['Data_Value_Type'].apply(lambda x: 1 if x == 'Age-Standardized' else 0)
data.rename(columns={'Question':'Disease_Type', 'YearStart':'Year', 'Break_Out':'Break_Out_Details'}, inplace=True)
data['Break_Out_Type'] = data['Break_Out_Type'].replace('Overall', 0)
pd.options.mode.chained_assignment = 'warn'
lt2000 = pd.read_csv("https://docs.google.com/uc?export=download&id=1ktRNl7jg0Z83rkymD9gcsGLdVqVaFtd-&confirm=t")
lt2000 = lt2000[(lt2000['race_name'] == 'Total') & (lt2000['age_name'] == '<1 year')]
lt2000 = lt2000[['location_name', 'val']]
lt2000.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt2005 = pd.read_csv("https://docs.google.com/uc?export=download&id=1xZqeOgj32-BkOhDTZVc4k_tp1ddnOEh7&confirm=t")
lt2005 = lt2005[(lt2005['race_name'] == 'Total') & (lt2005['age_name'] == '<1 year')]
lt2005 = lt2005[['location_name', 'val']]
lt2005.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt2010 = pd.read_csv("https://docs.google.com/uc?export=download&id=1ItqHBuuUa38PVytfahaAV8NWwbhHMMg8&confirm=t")
lt2010 = lt2010[(lt2010['race_name'] == 'Total') & (lt2010['age_name'] == '<1 year')]
lt2010 = lt2010[['location_name', 'val']]
lt2010.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt2015 = pd.read_csv("https://docs.google.com/uc?export=download&id=1rOgQY1RQiry2ionTKM_UWgT8cYD2E0vX&confirm=t")
lt2015 = lt2015[(lt2015['race_name'] == 'Total') & (lt2015['age_name'] == '<1 year')]
lt2015 = lt2015[['location_name', 'val']]
lt2015.rename(columns={'val':'Life_Expectancy'}, inplace=True)
lt_data = pd.concat([lt2000, lt2005, lt2010, lt2015])
lt_data.drop_duplicates(subset=['location_name'], inplace=True)
data2 = pd.merge(data, lt_data, how='inner', left_on='LocationDesc', right_on='location_name')
data2.drop(columns=['location_name'], axis=1, inplace=True)
data2 = data2[(data2['Break_Out_Details'] != '75+') & (data2['Break_Out_Details'] != '35+')]
data2.rename(columns={'Question':'Disease_Type'}, inplace=True)
data2['Life_Expectancy'] = np.where(data2['Break_Out_Type'] == 0, data2['Life_Expectancy'], np.nan)
data2 = data2.reset_index(drop=True)
return data2