mastergopote44 commited on
Commit
08f4213
1 Parent(s): 70b2bd0
Files changed (1) hide show
  1. Long-Term-Care-Aggregated-Data.py +0 -196
Long-Term-Care-Aggregated-Data.py DELETED
@@ -1,196 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """Long-Term-Care-Aggregated-Data.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/14YdgB8b4TtNetbHpTstGH7W4DLa3Yyxq
8
- """
9
-
10
- from datasets import GeneratorBasedBuilder, DownloadManager, DatasetInfo, Array3D, BuilderConfig, SplitGenerator, Version
11
- from datasets.features import Features, Value, Sequence
12
- import datasets
13
- import pandas as pd
14
- import json
15
- import zipfile
16
- from PIL import Image
17
- import numpy as np
18
- import io
19
- import csv
20
- import json
21
- import os
22
- from typing import List
23
- import datasets
24
- import logging
25
-
26
- _CITATION = """\
27
- @misc{long_term_care_aggregated_dataset,
28
- title = {Long-Term Care Aggregated Dataset},
29
- author = {Kao, Hsuan-Chen (Justin)},
30
- year = {2024},
31
- publisher = {Hugging Face},
32
- url = {https://github.com/justinkao44/STA663_Project_1},
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- The Long-Term Care Aggregated Dataset is a collection of insurance data for 'incidence' and 'termination' categories.
38
- It is compiled from Long Term Care insurance policies data, providing insights into trends and patterns in insurance claims
39
- and terminations. This dataset can be used for actuarial analysis, risk assessment, and to inform insurance product development.
40
- """
41
-
42
- _HOMEPAGE = "https://github.com/justinkao44/STA663_Project_1"
43
-
44
- _LICENSE = "Apache-2.0"
45
-
46
- _URLS = {
47
- "train_incidence": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/train_filtered_incidence_df.csv",
48
- "train_termination": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/train_filtered_termination_df.csv",
49
- "validation_incidence": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/validation_filtered_incidence_df.csv",
50
- "validation_termination": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/validation_filtered_termination_df.csv",
51
- }
52
-
53
-
54
- class LongTermCareAggregatedData(datasets.GeneratorBasedBuilder):
55
- """Dataset for insurance 'incidence' and 'termination' data."""
56
-
57
- BUILDER_CONFIGS = [
58
- datasets.BuilderConfig(name="incidence", version=datasets.Version("1.0.0"), description="This part of the dataset includes incidence features"),
59
- datasets.BuilderConfig(name="termination", version=datasets.Version("1.0.0"), description="This part of the dataset includes termination features"),
60
- ]
61
-
62
- def _info(self):
63
- if self.config.name == "incidence":
64
- features = datasets.Features({
65
- "Group_Indicator": datasets.Value("string"),
66
- "Gender": datasets.Value("string"),
67
- "Issue_Age_Bucket": datasets.Value("string"),
68
- "Incurred_Age_Bucket": datasets.Value("string"),
69
- "Issue_Year_Bucket": datasets.Value("string"),
70
- "Policy_Year": datasets.Value("string"),
71
- "Marital_Status": datasets.Value("string"),
72
- "Premium_Class": datasets.Value("string"),
73
- "Underwriting_Type": datasets.Value("string"),
74
- "Coverage_Type_Bucket": datasets.Value("string"),
75
- "Tax_Qualification_Status": datasets.Value("string"),
76
- "Inflation_Rider": datasets.Value("string"),
77
- "Rate_Increase_Flag": datasets.Value("string"),
78
- "Restoration_of_Benefits": datasets.Value("string"),
79
- "NH_Orig_Daily_Ben_Bucket": datasets.Value("string"),
80
- "ALF_Orig_Daily_Ben_Bucket": datasets.Value("string"),
81
- "HHC_Orig_Daily_Ben_Bucket": datasets.Value("string"),
82
- "NH_Ben_Period_Bucket": datasets.Value("string"),
83
- "ALF_Ben_Period_Bucket": datasets.Value("string"),
84
- "HHC_Ben_Period_Bucket": datasets.Value("string"),
85
- "NH_EP_Bucket": datasets.Value("string"),
86
- "ALF_EP_Bucket": datasets.Value("string"),
87
- "HHC_EP_Bucket": datasets.Value("string"),
88
- "Region": datasets.Value("string"),
89
- "Active_Exposure": datasets.Value("float64"),
90
- "Total_Exposure": datasets.Value("float64"),
91
- "Claim_Count": datasets.Value("int32"),
92
- "Count_NH": datasets.Value("int32"),
93
- "Count_ALF": datasets.Value("int32"),
94
- "Count_HHC": datasets.Value("int32"),
95
- "Count_Unk": datasets.Value("int32"),
96
- })
97
- elif self.config.name == "termination":
98
- features = datasets.Features({
99
- "Gender": datasets.Value("string"),
100
- "Incurred_Age_Bucket": datasets.Value("string"),
101
- "Incurred_Year_Bucket": datasets.Value("string"),
102
- "Claim_Type": datasets.Value("string"),
103
- "Region": datasets.Value("string"),
104
- "Diagnosis_Category": datasets.Value("string"),
105
- "Claim_Duration": datasets.Value("int64"),
106
- "Exposure": datasets.Value("int64"),
107
- "Deaths": datasets.Value("int64"),
108
- "Recovery": datasets.Value("int64"),
109
- "Terminations": datasets.Value("int64"),
110
- "Benefit_Expiry": datasets.Value("int64"),
111
- "Others_Terminations": datasets.Value("int64"),
112
- })
113
- else:
114
- raise ValueError(f"BuilderConfig name not recognized: {self.config.name}")
115
-
116
- return datasets.DatasetInfo(
117
- description=_DESCRIPTION,
118
- features=features,
119
- supervised_keys=None,
120
- homepage="https://www.soa.org/resources/experience-studies/2020/2000-2016-ltc-aggregate-database/",
121
- citation="Please cite this dataset as: Society of Actuaries (SOA). (2020). Long Term Care Insurance Aggregate Experience Data, 2000-2016."
122
- )
123
-
124
- def _split_generators(self, dl_manager):
125
-
126
- downloaded_files = dl_manager.download(_URLS)
127
-
128
- return [
129
- datasets.SplitGenerator(
130
- name="train_incidence",
131
- gen_kwargs={
132
- "data_file": downloaded_files["train_incidence"],
133
- "split": "incidence",
134
- },
135
- ),
136
- datasets.SplitGenerator(
137
- name="validation_incidence",
138
- gen_kwargs={
139
- "data_file": downloaded_files["validation_incidence"],
140
- "split": "incidence",
141
- },
142
- ),
143
- datasets.SplitGenerator(
144
- name="train_termination",
145
- gen_kwargs={
146
- "data_file": downloaded_files["train_termination"],
147
- "split": "termination",
148
- },
149
- ),
150
- datasets.SplitGenerator(
151
- name="validation_termination",
152
- gen_kwargs={
153
- "data_file": downloaded_files["validation_termination"],
154
- "split": "termination",
155
- },
156
- ),
157
- ]
158
-
159
- def _generate_examples(self, data_file, split):
160
- # Read the CSV file for the given split
161
- dataframe = pd.read_csv(data_file)
162
-
163
- # Determine the feature columns based on the split type
164
- feature_columns = self._get_feature_columns(split)
165
-
166
- # Yield examples
167
- for idx, row in dataframe.iterrows():
168
- feature_dict = {column: row[column] for column in feature_columns}
169
- yield idx, feature_dict
170
-
171
- def _get_feature_columns(self, split):
172
- # Define the feature columns for 'incidence'
173
- incidence_columns = [
174
- "Group_Indicator", "Gender", "Issue_Age_Bucket", "Incurred_Age_Bucket",
175
- "Issue_Year_Bucket", "Policy_Year", "Marital_Status", "Premium_Class",
176
- "Underwriting_Type", "Coverage_Type_Bucket", "Tax_Qualification_Status",
177
- "Inflation_Rider", "Rate_Increase_Flag", "Restoration_of_Benefits",
178
- "NH_Orig_Daily_Ben_Bucket", "ALF_Orig_Daily_Ben_Bucket", "HHC_Orig_Daily_Ben_Bucket",
179
- "NH_Ben_Period_Bucket", "ALF_Ben_Period_Bucket", "HHC_Ben_Period_Bucket",
180
- "NH_EP_Bucket", "ALF_EP_Bucket", "HHC_EP_Bucket", "Region",
181
- "Active_Exposure", "Total_Exposure", "Claim_Count", "Count_NH", "Count_ALF", "Count_HHC", "Count_Unk",
182
- ]
183
-
184
- # Define the feature columns for 'termination'
185
- termination_columns = [
186
- "Gender", "Incurred_Age_Bucket", "Incurred_Year_Bucket", "Claim_Type",
187
- "Region", "Diagnosis_Category", "Claim_Duration", "Exposure", "Deaths",
188
- "Recovery", "Terminations", "Benefit_Expiry", "Others_Terminations",
189
- ]
190
-
191
- if split == "incidence":
192
- return incidence_columns
193
- elif split == "termination":
194
- return termination_columns
195
- else:
196
- raise ValueError(f"Split name not recognized: {split}")