bstds commited on
Commit
16dcd38
1 Parent(s): b22fb6a

Upload 53 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +0 -0
  2. documentations/api.pdf +0 -0
  3. documentations/manual.pdf +0 -0
  4. documentations/papers/ideal2005probabilistic.pdf +0 -0
  5. documentations/papers/padm2006privacy.pdf +0 -0
  6. documentations/papers/pakdd2009accurate.pdf +0 -0
  7. documentations/papers/sigkddexp2009febrl.pdf +0 -0
  8. examples/generate-data-english.py +330 -0
  9. examples/generate-data-japanese.py +185 -0
  10. geco_data_generator/__init__.py +2 -0
  11. geco_data_generator/attrgenfunct.py +316 -0
  12. geco_data_generator/basefunctions.py +614 -0
  13. geco_data_generator/contdepfunct.py +64 -0
  14. geco_data_generator/corruptor.py +2035 -0
  15. geco_data_generator/data/age-freq.csv +17 -0
  16. geco_data_generator/data/city-gender-bloodpressure.csv +13 -0
  17. geco_data_generator/data/gender-bloodpressure.csv +5 -0
  18. geco_data_generator/data/gender-city-income.csv +13 -0
  19. geco_data_generator/data/gender-city-japanese.csv +10 -0
  20. geco_data_generator/data/gender-city.csv +11 -0
  21. geco_data_generator/data/gender-income-data.csv +6 -0
  22. geco_data_generator/data/gender-income.csv +6 -0
  23. geco_data_generator/data/givenname_f_freq.csv +529 -0
  24. geco_data_generator/data/givenname_freq.csv +888 -0
  25. geco_data_generator/data/givenname_m_freq.csv +373 -0
  26. geco_data_generator/data/ocr-variations-upper-lower.csv +51 -0
  27. geco_data_generator/data/ocr-variations.csv +31 -0
  28. geco_data_generator/data/phonetic-variations.csv +358 -0
  29. geco_data_generator/data/postcode_act_freq.csv +43 -0
  30. geco_data_generator/data/qwerty-keyboard.csv +9 -0
  31. geco_data_generator/data/state-income.csv +11 -0
  32. geco_data_generator/data/surname-freq-japanese.csv +22 -0
  33. geco_data_generator/data/surname-freq.csv +0 -0
  34. geco_data_generator/data/surname-misspell-japanese.csv +14 -0
  35. geco_data_generator/data/surname-misspell.csv +443 -0
  36. geco_data_generator/generator.py +2067 -0
  37. requirements.txt +0 -0
  38. setup.py +36 -0
  39. tests/attrgenfunct_test.py +922 -0
  40. tests/basefunctions_test.py +1475 -0
  41. tests/contdepfunct_test.py +381 -0
  42. tests/corruptor_test.py +0 -0
  43. tests/generator_test.py +0 -0
  44. tests/logs/basefunctionsTest-20230130-1746.csv +140 -0
  45. tests/logs/contdepfunctTest-20230130-1748.csv +5 -0
  46. tests/logs/contdepfunctTest-20230130-1749.csv +15 -0
  47. tests/main_test.py +793 -0
  48. tests/test.csv +4 -0
  49. tests/test.txt +0 -0
  50. tests/test1.csv +4 -0
README.md ADDED
File without changes
documentations/api.pdf ADDED
Binary file (251 kB). View file
 
documentations/manual.pdf ADDED
Binary file (269 kB). View file
 
documentations/papers/ideal2005probabilistic.pdf ADDED
Binary file (127 kB). View file
 
documentations/papers/padm2006privacy.pdf ADDED
Binary file (121 kB). View file
 
documentations/papers/pakdd2009accurate.pdf ADDED
Binary file (660 kB). View file
 
documentations/papers/sigkddexp2009febrl.pdf ADDED
Binary file (797 kB). View file
 
examples/generate-data-english.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from geco_data_generator import (attrgenfunct, basefunctions, contdepfunct, generator, corruptor)
2
+ import random
3
+ random.seed(42)
4
+
5
+ # Set the Unicode encoding for this data generation project. This needs to be
6
+ # changed to another encoding for different Unicode character sets.
7
+ # Valid encoding strings are listed here:
8
+ # http://docs.python.org/library/codecs.html#standard-encodings
9
+ #
10
+ unicode_encoding_used = 'ascii'
11
+
12
+ # The name of the record identifier attribute (unique value for each record).
13
+ # This name cannot be given as name to any other attribute that is generated.
14
+ #
15
+ rec_id_attr_name = 'rec-id'
16
+
17
+ # Set the file name of the data set to be generated (this will be a comma
18
+ # separated values, CSV, file).
19
+ #
20
+ out_file_name = 'example-data-english.csv'
21
+
22
+ # Set how many original and how many duplicate records are to be generated.
23
+ #
24
+ num_org_rec = 5_000_000
25
+ num_dup_rec = 100_000
26
+
27
+ # Set the maximum number of duplicate records can be generated per original
28
+ # record.
29
+ #
30
+ max_duplicate_per_record = 3
31
+
32
+ # Set the probability distribution used to create the duplicate records for one
33
+ # original record (possible values are: 'uniform', 'poisson', 'zipf').
34
+ #
35
+ num_duplicates_distribution = 'zipf'
36
+
37
+ # Set the maximum number of modification that can be applied to a single
38
+ # attribute (field).
39
+ #
40
+ max_modification_per_attr = 1
41
+
42
+ # Set the number of modification that are to be applied to a record.
43
+ #
44
+ num_modification_per_record = 5
45
+
46
+ # Check if the given the unicode encoding selected is valid.
47
+ #
48
+ basefunctions.check_unicode_encoding_exists(unicode_encoding_used)
49
+
50
+ # -----------------------------------------------------------------------------
51
+ # Define the attributes to be generated (using methods from the generator.py
52
+ # module).
53
+ #
54
+ gname_attr = generator.GenerateFreqAttribute(
55
+ attribute_name='given-name',
56
+ freq_file_name='givenname_f_freq.csv',
57
+ has_header_line=False,
58
+ unicode_encoding=unicode_encoding_used,
59
+ )
60
+
61
+ sname_attr = generator.GenerateFreqAttribute(
62
+ attribute_name='surname',
63
+ freq_file_name='surname-freq.csv',
64
+ has_header_line=False,
65
+ unicode_encoding=unicode_encoding_used,
66
+ )
67
+
68
+ postcode_attr = generator.GenerateFreqAttribute(
69
+ attribute_name='postcode',
70
+ freq_file_name='postcode_act_freq.csv',
71
+ has_header_line=False,
72
+ unicode_encoding=unicode_encoding_used,
73
+ )
74
+
75
+ phone_num_attr = generator.GenerateFuncAttribute(
76
+ attribute_name='telephone-number',
77
+ function=attrgenfunct.generate_phone_number_australia,
78
+ )
79
+
80
+ credit_card_attr = generator.GenerateFuncAttribute(
81
+ attribute_name='credit-card-number', function=attrgenfunct.generate_credit_card_number
82
+ )
83
+
84
+ age_uniform_attr = generator.GenerateFuncAttribute(
85
+ attribute_name='age-uniform',
86
+ function=attrgenfunct.generate_uniform_age,
87
+ parameters=[0, 120],
88
+ )
89
+
90
+ income_normal_attr = generator.GenerateFuncAttribute(
91
+ attribute_name='income-normal',
92
+ function=attrgenfunct.generate_normal_value,
93
+ parameters=[50000, 20000, 0, 1000000, 'float2'],
94
+ )
95
+
96
+ rating_normal_attr = generator.GenerateFuncAttribute(
97
+ attribute_name='rating-normal',
98
+ function=attrgenfunct.generate_normal_value,
99
+ parameters=[0.0, 1.0, None, None, 'float9'],
100
+ )
101
+
102
+ gender_city_comp_attr = generator.GenerateCateCateCompoundAttribute(
103
+ categorical1_attribute_name='gender',
104
+ categorical2_attribute_name='city',
105
+ lookup_file_name='gender-city.csv',
106
+ has_header_line=True,
107
+ unicode_encoding='ascii',
108
+ )
109
+
110
+ sex_income_comp_attr = generator.GenerateCateContCompoundAttribute(
111
+ categorical_attribute_name='sex',
112
+ continuous_attribute_name='income',
113
+ continuous_value_type='float1',
114
+ lookup_file_name='gender-income.csv',
115
+ has_header_line=False,
116
+ unicode_encoding='ascii',
117
+ )
118
+
119
+ gender_town_salary_comp_attr = generator.GenerateCateCateContCompoundAttribute(
120
+ categorical1_attribute_name='alternative-gender',
121
+ categorical2_attribute_name='town',
122
+ continuous_attribute_name='salary',
123
+ continuous_value_type='float4',
124
+ lookup_file_name='gender-city-income.csv',
125
+ has_header_line=False,
126
+ unicode_encoding='ascii',
127
+ )
128
+
129
+ age_blood_pressure_comp_attr = generator.GenerateContContCompoundAttribute(
130
+ continuous1_attribute_name='age',
131
+ continuous2_attribute_name='blood-pressure',
132
+ continuous1_funct_name='uniform',
133
+ continuous1_funct_param=[10, 110],
134
+ continuous2_function=contdepfunct.blood_pressure_depending_on_age,
135
+ continuous1_value_type='int',
136
+ continuous2_value_type='float3',
137
+ )
138
+
139
+ age_salary_comp_attr = generator.GenerateContContCompoundAttribute(
140
+ continuous1_attribute_name='age2',
141
+ continuous2_attribute_name='salary2',
142
+ continuous1_funct_name='normal',
143
+ continuous1_funct_param=[45, 20, 15, 130],
144
+ continuous2_function=contdepfunct.salary_depending_on_age,
145
+ continuous1_value_type='int',
146
+ continuous2_value_type='float1',
147
+ )
148
+
149
+ # -----------------------------------------------------------------------------
150
+ # Define how the generated records are to be corrupted (using methods from
151
+ # the corruptor.py module).
152
+
153
+ # For a value edit corruptor, the sum or the four probabilities given must
154
+ # be 1.0.
155
+ #
156
+ edit_corruptor = corruptor.CorruptValueEdit(
157
+ position_function=corruptor.position_mod_normal,
158
+ char_set_funct=basefunctions.char_set_ascii,
159
+ insert_prob=0.5,
160
+ delete_prob=0.5,
161
+ substitute_prob=0.0,
162
+ transpose_prob=0.0,
163
+ )
164
+
165
+ edit_corruptor2 = corruptor.CorruptValueEdit(
166
+ position_function=corruptor.position_mod_uniform,
167
+ char_set_funct=basefunctions.char_set_ascii,
168
+ insert_prob=0.25,
169
+ delete_prob=0.25,
170
+ substitute_prob=0.25,
171
+ transpose_prob=0.25,
172
+ )
173
+
174
+ surname_misspell_corruptor = corruptor.CorruptCategoricalValue(
175
+ lookup_file_name='surname-misspell.csv',
176
+ has_header_line=False,
177
+ unicode_encoding=unicode_encoding_used,
178
+ )
179
+
180
+ ocr_corruptor = corruptor.CorruptValueOCR(
181
+ position_function=corruptor.position_mod_normal,
182
+ lookup_file_name='ocr-variations.csv',
183
+ has_header_line=False,
184
+ unicode_encoding=unicode_encoding_used,
185
+ )
186
+
187
+ keyboard_corruptor = corruptor.CorruptValueKeyboard(
188
+ position_function=corruptor.position_mod_normal, row_prob=0.5, col_prob=0.5
189
+ )
190
+
191
+ phonetic_corruptor = corruptor.CorruptValuePhonetic(
192
+ lookup_file_name='phonetic-variations.csv',
193
+ has_header_line=False,
194
+ unicode_encoding=unicode_encoding_used,
195
+ )
196
+
197
+ missing_val_corruptor = corruptor.CorruptMissingValue()
198
+
199
+ postcode_missing_val_corruptor = corruptor.CorruptMissingValue(missing_val='missing')
200
+
201
+ given_name_missing_val_corruptor = corruptor.CorruptMissingValue(missing_value='unknown')
202
+
203
+ # -----------------------------------------------------------------------------
204
+ # Define the attributes to be generated for this data set, and the data set
205
+ # itself.
206
+ #
207
+ attr_name_list = [
208
+ 'gender',
209
+ 'given-name',
210
+ 'surname',
211
+ 'postcode',
212
+ 'city',
213
+ 'telephone-number',
214
+ 'credit-card-number',
215
+ 'income-normal',
216
+ 'age-uniform',
217
+ 'income',
218
+ 'age',
219
+ 'sex',
220
+ 'blood-pressure',
221
+ ]
222
+
223
+ attr_data_list = [
224
+ gname_attr,
225
+ sname_attr,
226
+ postcode_attr,
227
+ phone_num_attr,
228
+ credit_card_attr,
229
+ age_uniform_attr,
230
+ income_normal_attr,
231
+ gender_city_comp_attr,
232
+ sex_income_comp_attr,
233
+ gender_town_salary_comp_attr,
234
+ age_blood_pressure_comp_attr,
235
+ age_salary_comp_attr,
236
+ ]
237
+
238
+ # Nothing to change here - set-up the data set generation object.
239
+ #
240
+ test_data_generator = generator.GenerateDataSet(
241
+ output_file_name=out_file_name,
242
+ write_header_line=True,
243
+ rec_id_attr_name=rec_id_attr_name,
244
+ number_of_records=num_org_rec,
245
+ attribute_name_list=attr_name_list,
246
+ attribute_data_list=attr_data_list,
247
+ unicode_encoding=unicode_encoding_used,
248
+ )
249
+
250
+ # Define the probability distribution of how likely an attribute will be
251
+ # selected for a modification.
252
+ # Each of the given probability values must be between 0 and 1, and the sum of
253
+ # them must be 1.0.
254
+ # If a probability is set to 0 for a certain attribute, then no modification
255
+ # will be applied on this attribute.
256
+ #
257
+ attr_mod_prob_dictionary = {
258
+ 'gender': 0.1,
259
+ 'given-name': 0.2,
260
+ 'surname': 0.2,
261
+ 'postcode': 0.1,
262
+ 'city': 0.1,
263
+ 'telephone-number': 0.15,
264
+ 'credit-card-number': 0.1,
265
+ 'age': 0.05,
266
+ }
267
+
268
+ # Define the actual corruption (modification) methods that will be applied on
269
+ # the different attributes.
270
+ # For each attribute, the sum of probabilities given must sum to 1.0.
271
+ #
272
+ attr_mod_data_dictionary = {
273
+ 'gender': [(1.0, missing_val_corruptor)],
274
+ 'surname': [
275
+ (0.1, surname_misspell_corruptor),
276
+ (0.1, ocr_corruptor),
277
+ (0.1, keyboard_corruptor),
278
+ (0.7, phonetic_corruptor),
279
+ ],
280
+ 'given-name': [
281
+ (0.1, edit_corruptor2),
282
+ (0.1, ocr_corruptor),
283
+ (0.1, keyboard_corruptor),
284
+ (0.7, phonetic_corruptor),
285
+ ],
286
+ 'postcode': [(0.8, keyboard_corruptor), (0.2, postcode_missing_val_corruptor)],
287
+ 'city': [
288
+ (0.1, edit_corruptor),
289
+ (0.1, missing_val_corruptor),
290
+ (0.4, keyboard_corruptor),
291
+ (0.4, phonetic_corruptor),
292
+ ],
293
+ 'age': [(1.0, edit_corruptor2)],
294
+ 'telephone-number': [(1.0, missing_val_corruptor)],
295
+ 'credit-card-number': [(1.0, edit_corruptor)],
296
+ }
297
+
298
+ # Nothing to change here - set-up the data set corruption object
299
+ #
300
+ test_data_corruptor = corruptor.CorruptDataSet(
301
+ number_of_org_records=num_org_rec,
302
+ number_of_mod_records=num_dup_rec,
303
+ attribute_name_list=attr_name_list,
304
+ max_num_dup_per_rec=max_duplicate_per_record,
305
+ num_dup_dist=num_duplicates_distribution,
306
+ max_num_mod_per_attr=max_modification_per_attr,
307
+ num_mod_per_rec=num_modification_per_record,
308
+ attr_mod_prob_dict=attr_mod_prob_dictionary,
309
+ attr_mod_data_dict=attr_mod_data_dictionary,
310
+ )
311
+
312
+ # =============================================================================
313
+ # No need to change anything below here
314
+
315
+ # Start the data generation process
316
+ #
317
+ rec_dict = test_data_generator.generate()
318
+
319
+ assert len(rec_dict) == num_org_rec # Check the number of generated records
320
+
321
+ # Corrupt (modify) the original records into duplicate records
322
+ #
323
+ rec_dict = test_data_corruptor.corrupt_records(rec_dict)
324
+
325
+ assert len(rec_dict) == num_org_rec + num_dup_rec # Check total number of records
326
+
327
+ # Write generate data into a file
328
+ #
329
+ test_data_generator.write()
330
+
examples/generate-data-japanese.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ from geco_data_generator import basefunctions, attrgenfunct, contdepfunct, generator, corruptor
4
+ import random
5
+ random.seed(42)
6
+
7
+
8
+ # Set the Unicode encoding for this data generation project. This needs to be
9
+ # changed to another encoding for different Unicode character sets.
10
+ # Valid encoding strings are listed here:
11
+ # http://docs.python.org/library/codecs.html#standard-encodings
12
+ #
13
+ unicode_encoding_used = 'cp932'
14
+
15
+ # The name of the record identifier attribute (unique value for each record).
16
+ # This name cannot be given as name to any other attribute that is generated.
17
+ #
18
+ rec_id_attr_name = 'rec-id'
19
+
20
+ # Set the file name of the data set to be generated (this will be a comma
21
+ # separated values, CSV, file).
22
+ #
23
+ out_file_name = 'example-data-japanese.csv'
24
+
25
+ # Set how many original and how many duplicate records are to be generated.
26
+ #
27
+ num_org_rec = 10000
28
+ num_dup_rec = 10000
29
+
30
+ # Set the maximum number of duplicate records can be generated per original
31
+ # record.
32
+ #
33
+ max_duplicate_per_record = 3
34
+
35
+ # Set the probability distribution used to create the duplicate records for one
36
+ # original record (possible values are: 'uniform', 'poisson', 'zipf').
37
+ #
38
+ num_duplicates_distribution = 'zipf'
39
+
40
+ # Set the maximum number of modification that can be applied to a single
41
+ # attribute (field).
42
+ #
43
+ max_modification_per_attr = 1
44
+
45
+ # Set the number of modification that are to be applied to a record.
46
+ #
47
+ num_modification_per_record = 5
48
+
49
+ # Check if the given the unicode encoding selected is valid.
50
+ #
51
+ basefunctions.check_unicode_encoding_exists(unicode_encoding_used)
52
+
53
+ # -----------------------------------------------------------------------------
54
+ # Define the attributes to be generated (using methods from the generator.py
55
+ # module).
56
+ #
57
+ surname_attr = generator.GenerateFreqAttribute(
58
+ attribute_name='surname',
59
+ freq_file_name='surname-freq-japanese.csv',
60
+ has_header_line=False,
61
+ unicode_encoding=unicode_encoding_used,
62
+ )
63
+
64
+ credit_card_attr = generator.GenerateFuncAttribute(
65
+ attribute_name='credit-card-number', function=attrgenfunct.generate_credit_card_number
66
+ )
67
+
68
+ age_normal_attr = generator.GenerateFuncAttribute(
69
+ attribute_name='age',
70
+ function=attrgenfunct.generate_normal_age,
71
+ parameters=[45, 30, 0, 130],
72
+ )
73
+
74
+ gender_city_comp_attr = generator.GenerateCateCateCompoundAttribute(
75
+ categorical1_attribute_name='gender',
76
+ categorical2_attribute_name='city',
77
+ lookup_file_name='gender-city-japanese.csv',
78
+ has_header_line=False,
79
+ unicode_encoding=unicode_encoding_used,
80
+ )
81
+
82
+ # -----------------------------------------------------------------------------
83
+ # Define how the generated records are to be corrupted (using methods from
84
+ # the corruptor.py module).
85
+
86
+ # For a value edit corruptor, the sum or the four probabilities given must
87
+ # be 1.0.
88
+ #
89
+ surname_misspell_corruptor = corruptor.CorruptCategoricalValue(
90
+ lookup_file_name='surname-misspell-japanese.csv',
91
+ has_header_line=False,
92
+ unicode_encoding=unicode_encoding_used,
93
+ )
94
+
95
+ edit_corruptor = corruptor.CorruptValueEdit(
96
+ position_function=corruptor.position_mod_normal,
97
+ char_set_funct=basefunctions.char_set_ascii,
98
+ insert_prob=0.0,
99
+ delete_prob=0.0,
100
+ substitute_prob=0.6,
101
+ transpose_prob=0.4,
102
+ )
103
+
104
+ missing_val_corruptor = corruptor.CorruptMissingValue()
105
+
106
+ # -----------------------------------------------------------------------------
107
+ # Define the attributes to be generated for this data set, and the data set
108
+ # itself.
109
+ #
110
+ attr_name_list = ['surname', 'age', 'gender', 'city', 'credit-card-number']
111
+
112
+ attr_data_list = [surname_attr, credit_card_attr, age_normal_attr, gender_city_comp_attr]
113
+
114
+ # Nothing to change here - set-up the data set generation object.
115
+ #
116
+ test_data_generator = generator.GenerateDataSet(
117
+ output_file_name=out_file_name,
118
+ write_header_line=True,
119
+ rec_id_attr_name=rec_id_attr_name,
120
+ number_of_records=num_org_rec,
121
+ attribute_name_list=attr_name_list,
122
+ attribute_data_list=attr_data_list,
123
+ unicode_encoding=unicode_encoding_used,
124
+ )
125
+
126
+ # Define the probability distribution of how likely an attribute will be
127
+ # selected for a modification.
128
+ # Each of the given probability values must be between 0 and 1, and the sum of
129
+ # them must be 1.0.
130
+ # If a probability is set to 0 for a certain attribute, then no modification
131
+ # will be applied on this attribute.
132
+ #
133
+ attr_mod_prob_dictionary = {
134
+ 'surname': 0.5,
135
+ 'age': 0.2,
136
+ 'gender': 0.05,
137
+ 'city': 0.05,
138
+ 'credit-card-number': 0.2,
139
+ }
140
+
141
+ # Define the actual corruption (modification) methods that will be applied on
142
+ # the different attributes.
143
+ # For each attribute, the sum of probabilities given must sum to 1.0.
144
+ #
145
+ attr_mod_data_dictionary = {
146
+ 'surname': [(0.9, surname_misspell_corruptor), (0.1, missing_val_corruptor)],
147
+ 'age': [(0.1, missing_val_corruptor), (0.9, edit_corruptor)],
148
+ 'gender': [(1.0, missing_val_corruptor)],
149
+ 'city': [(1.0, missing_val_corruptor)],
150
+ 'credit-card-number': [(0.1, missing_val_corruptor), (0.9, edit_corruptor)],
151
+ }
152
+
153
+ # Nothing to change here - set-up the data set corruption object
154
+ #
155
+ test_data_corruptor = corruptor.CorruptDataSet(
156
+ number_of_org_records=num_org_rec,
157
+ number_of_mod_records=num_dup_rec,
158
+ attribute_name_list=attr_name_list,
159
+ max_num_dup_per_rec=max_duplicate_per_record,
160
+ num_dup_dist=num_duplicates_distribution,
161
+ max_num_mod_per_attr=max_modification_per_attr,
162
+ num_mod_per_rec=num_modification_per_record,
163
+ attr_mod_prob_dict=attr_mod_prob_dictionary,
164
+ attr_mod_data_dict=attr_mod_data_dictionary,
165
+ )
166
+
167
+ # =============================================================================
168
+ # No need to change anything below here
169
+
170
+ # Start the generation process
171
+ #
172
+ rec_dict = test_data_generator.generate()
173
+
174
+ assert len(rec_dict) == num_org_rec # Check the number of generated records
175
+
176
+ # Corrupt (modify) the original records into duplicate records
177
+ #
178
+ rec_dict = test_data_corruptor.corrupt_records(rec_dict)
179
+
180
+ assert len(rec_dict) == num_org_rec + num_dup_rec # Check total number of records
181
+
182
+ # Write generate data into a file
183
+ #
184
+ test_data_generator.write()
185
+
geco_data_generator/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
geco_data_generator/attrgenfunct.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Functions to generate independent attribute values
4
+ import random
5
+
6
+ from geco_data_generator import basefunctions
7
+
8
+
9
+
10
+
11
+ def generate_phone_number_australia():
12
+ """
13
+ Randomly generate an Australian telephone number made of a two-digit area
14
+ code and an eight-digit number made of two blocks of four digits (with a
15
+ space between). For example: `02 1234 5678'
16
+
17
+ For details see: http://en.wikipedia.org/wiki/ \
18
+ Telephone_numbers_in_Australia#Personal_numbers_.2805.29
19
+ """
20
+
21
+ area_code = random.choice(['02', '03', '04', '07', '08'])
22
+
23
+ number1 = random.randint(1, 9999)
24
+ number2 = random.randint(1, 9999)
25
+
26
+ oz_phone_str = (
27
+ str(area_code) + ' ' + str(number1).zfill(4) + ' ' + str(number2).zfill(4)
28
+ )
29
+ assert len(oz_phone_str) == 12
30
+ assert oz_phone_str[0] == '0'
31
+
32
+ return oz_phone_str
33
+
34
+
35
+ def generate_credit_card_number():
36
+ """Randomly generate a credit card made of four four-digit numbers (with a
37
+ space between each number group). For example: '1234 5678 9012 3456'
38
+
39
+ For details see: http://en.wikipedia.org/wiki/Bank_card_number
40
+ """
41
+
42
+ number1 = random.randint(1, 9999)
43
+ assert number1 > 0
44
+
45
+ number2 = random.randint(1, 9999)
46
+ assert number2 > 0
47
+
48
+ number3 = random.randint(1, 9999)
49
+ assert number3 > 0
50
+
51
+ number4 = random.randint(1, 9999)
52
+ assert number4 > 0
53
+
54
+ cc_str = (
55
+ str(number1).zfill(4)
56
+ + ' '
57
+ + str(number2).zfill(4)
58
+ + ' '
59
+ + str(number3).zfill(4)
60
+ + ' '
61
+ + str(number4).zfill(4)
62
+ )
63
+
64
+ assert len(cc_str) == 19
65
+
66
+ return cc_str
67
+
68
+
69
+ #
70
+ def generate_uniform_value(min_val, max_val, val_type):
71
+ """Randomly generate a numerical value according to a uniform distribution
72
+ between the minimum and maximum values given.
73
+
74
+ The value type can be set as 'int', so a string formatted as an integer
75
+ value is returned; or as 'float1' to 'float9', in which case a string
76
+ formatted as floating-point value with the specified number of digits
77
+ behind the comma is returned.
78
+
79
+ Note that for certain situations and string formats a value outside the
80
+ set range might be returned. For example, if min_val=100.25 and
81
+ val_type='float1' the rounding can result in a string value '100.2' to
82
+ be returned.
83
+
84
+ Suitable minimum and maximum values need to be selected to prevent such a
85
+ situation.
86
+ """
87
+
88
+ basefunctions.check_is_number('min_val', min_val)
89
+ basefunctions.check_is_number('max_val', max_val)
90
+ assert min_val < max_val
91
+
92
+ r = random.uniform(min_val, max_val)
93
+
94
+ return basefunctions.float_to_str(r, val_type)
95
+
96
+
97
+ #
98
+ def generate_uniform_age(min_val, max_val):
99
+ """Randomly generate an age value (returned as integer) according to a
100
+ uniform distribution between the minimum and maximum values given.
101
+
102
+ This function is simple a shorthand for:
103
+
104
+ generate_uniform_value(min_val, max_val, 'int')
105
+ """
106
+
107
+ assert min_val >= 0
108
+ assert max_val <= 130
109
+
110
+ return generate_uniform_value(min_val, max_val, 'int')
111
+
112
+
113
+ def generate_normal_value(mu, sigma, min_val, max_val, val_type):
114
+ """Randomly generate a numerical value according to a normal distribution
115
+ with the mean (mu) and standard deviation (sigma) given.
116
+
117
+ A minimum and maximum allowed value can given as additional parameters,
118
+ if set to None then no minimum and/or maximum limit is set.
119
+
120
+ The value type can be set as 'int', so a string formatted as an integer
121
+ value is returned; or as 'float1' to 'float9', in which case a string
122
+ formatted as floating-point value with the specified number of digits
123
+ behind the comma is returned.
124
+ """
125
+
126
+ basefunctions.check_is_number('mu', mu)
127
+ basefunctions.check_is_number('sigma', sigma)
128
+ assert sigma > 0.0
129
+
130
+ if min_val != None:
131
+ basefunctions.check_is_number('min_val', min_val)
132
+ assert min_val <= mu
133
+
134
+ if max_val != None:
135
+ basefunctions.check_is_number('max_val', max_val)
136
+ assert max_val >= mu
137
+
138
+ if (min_val != None) and (max_val != None):
139
+ assert min_val < max_val
140
+
141
+ if (min_val != None) or (max_val != None):
142
+ in_range = False # For testing if the random value is with the range
143
+ else:
144
+ in_range = True
145
+
146
+ r = random.normalvariate(mu, sigma)
147
+
148
+ while in_range == False:
149
+ if (min_val == None) or ((min_val != None) and (r >= min_val)):
150
+ in_range = True
151
+
152
+ if (max_val != None) and (r > max_val):
153
+ in_range = False
154
+
155
+ if in_range == True:
156
+ r_str = basefunctions.float_to_str(r, val_type)
157
+ r_test = float(r_str)
158
+ if (min_val != None) and (r_test < min_val):
159
+ in_range = False
160
+ if (max_val != None) and (r_test > max_val):
161
+ in_range = False
162
+
163
+ if in_range == False:
164
+ r = random.normalvariate(mu, sigma)
165
+
166
+ if min_val != None:
167
+ assert r >= min_val
168
+ if max_val != None:
169
+ assert r <= max_val
170
+
171
+ return basefunctions.float_to_str(r, val_type)
172
+
173
+
174
+ #
175
+ def generate_normal_age(mu, sigma, min_val, max_val):
176
+ """Randomly generate an age value (returned as integer) according to a
177
+ normal distribution following the mean and standard deviation values
178
+ given, and limited to age values between (including) the minimum and
179
+ maximum values given.
180
+
181
+ This function is simple a shorthand for:
182
+
183
+ generate_normal_value(mu, sigma, min_val, max_val, 'int')
184
+ """
185
+
186
+ assert min_val >= 0
187
+ assert max_val <= 130
188
+
189
+ age = generate_normal_value(mu, sigma, min_val, max_val, 'int')
190
+
191
+ while (int(age) < min_val) or (int(age) > max_val):
192
+ age = generate_normal_value(mu, sigma, min_val, max_val, 'int')
193
+
194
+ return age
195
+
196
+
197
+ # =============================================================================
198
+
199
+ # If called from command line perform some examples: Generate values
200
+ #
201
+ if __name__ == '__main__':
202
+
203
+ num_test = 20
204
+
205
+ print('Generate %d Australian telephone numbers:' % (num_test))
206
+ for i in range(num_test):
207
+ print(' ', generate_phone_number_australia())
208
+ print()
209
+
210
+ print('Generate %d credit card numbers:' % (num_test))
211
+ for i in range(num_test):
212
+ print(' ', generate_credit_card_number())
213
+ print()
214
+
215
+ print(
216
+ 'Generate %d uniformly distributed integer numbers between -100' % (num_test)
217
+ + ' and -5:'
218
+ )
219
+ for i in range(num_test):
220
+ print(
221
+ ' ',
222
+ generate_uniform_value(-100, -5, 'int'),
223
+ )
224
+ print()
225
+
226
+ print(
227
+ 'Generate %d uniformly distributed floating-point numbers with ' % (num_test)
228
+ + '3 digits between -55 and 55:'
229
+ )
230
+ for i in range(num_test):
231
+ print(' ', generate_uniform_value(-55, 55, 'float3'))
232
+ print()
233
+
234
+ print(
235
+ 'Generate %d uniformly distributed floating-point numbers with ' % (num_test)
236
+ + '7 digits between 147 and 9843:'
237
+ )
238
+ for i in range(num_test):
239
+ print(' ', generate_uniform_value(147, 9843, 'float7'))
240
+ print()
241
+
242
+ print('Generate %d uniformly distributed age values between 0 and 120:' % (num_test))
243
+ for i in range(num_test):
244
+ print(' ', generate_uniform_age(0, 120))
245
+ print()
246
+
247
+ print('Generate %d uniformly distributed age values between 18 and 65:' % (num_test))
248
+ for i in range(num_test):
249
+ print(' ', generate_uniform_age(18, 65))
250
+ print()
251
+
252
+ print(
253
+ 'Generate %d normally distributed integer numbers between -200' % (num_test)
254
+ + ' and -3 with mean -50 and standard deviation 44:'
255
+ )
256
+ for i in range(num_test):
257
+ print(' ', generate_normal_value(-50, 44, -200, -3, 'int'))
258
+ print()
259
+
260
+ print(
261
+ 'Generate %d normally distributed floating-point numbers with ' % (num_test)
262
+ + '5 digits between -100 and 100 and with mean 22 and '
263
+ + 'standard deviation 74:'
264
+ )
265
+ for i in range(num_test):
266
+ print(' ', generate_normal_value(22, 74, -100, 100, 'float5'))
267
+ print()
268
+
269
+ print(
270
+ 'Generate %d normally distributed floating-point numbers with ' % (num_test)
271
+ + '9 digits with mean 22 and standard deviation 74:'
272
+ )
273
+ for i in range(num_test):
274
+ print(
275
+ ' ',
276
+ generate_normal_value(22, 74, min_val=None, max_val=None, val_type='float9'),
277
+ )
278
+ print()
279
+
280
+ print(
281
+ 'Generate %d normally distributed floating-point numbers with ' % (num_test)
282
+ + '2 digits with mean 22 and standard deviation 24 that'
283
+ + ' are larger than 10:'
284
+ )
285
+ for i in range(num_test):
286
+ print(
287
+ ' ', generate_normal_value(22, 74, min_val=10, max_val=None, val_type='float2')
288
+ )
289
+ print()
290
+
291
+ print(
292
+ 'Generate %d normally distributed floating-point numbers with ' % (num_test)
293
+ + '4 digits with mean 22 and standard deviation 24 that'
294
+ + ' are smaller than 30:'
295
+ )
296
+ for i in range(num_test):
297
+ print(
298
+ ' ', generate_normal_value(22, 74, min_val=None, max_val=40, val_type='float4')
299
+ )
300
+ print()
301
+
302
+ print(
303
+ 'Generate %d normally distributed age values between 0 and 120' % (num_test)
304
+ + ' with mean 45 and standard deviation 22:'
305
+ )
306
+ for i in range(num_test):
307
+ print(' ', generate_normal_age(45, 22, 0, 120))
308
+ print()
309
+
310
+ print(
311
+ 'Generate %d normally distributed age values between 18 and 65' % (num_test)
312
+ + ' with mean 30 and standard deviation 10:'
313
+ )
314
+ for i in range(num_test):
315
+ print(' ', generate_normal_age(30, 10, 18, 65))
316
+ print()
geco_data_generator/basefunctions.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Helper functions
4
+ import codecs # Used to read and write Unicode files
5
+ import os
6
+ import types
7
+
8
+ HERE = os.path.abspath(os.path.dirname(__file__))
9
+ DATA = os.path.join(HERE, 'data')
10
+
11
+ def check_is_not_none(variable, value):
12
+ """Check if the value given is not None.
13
+
14
+ The argument 'variable' needs to be set to the name (as a string) of the
15
+ value which is checked.
16
+ """
17
+
18
+ check_is_non_empty_string('variable', variable)
19
+
20
+ if value == None:
21
+ raise Exception('Value of "%s" is None' % (variable))
22
+
23
+
24
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
25
+
26
+
27
+ def check_is_string(variable, value):
28
+ """Check if the value given is of type string.
29
+
30
+ The argument 'variable' needs to be set to the name (as a string) of the
31
+ value which is checked.
32
+ """
33
+
34
+ check_is_non_empty_string('variable', variable)
35
+
36
+ if not isinstance(value, str):
37
+ raise Exception(
38
+ 'Value of "%s" is not a string: %s (%s)' % (variable, str(value), type(value))
39
+ )
40
+
41
+
42
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
43
+
44
+
45
+ def check_is_unicode_string(variable, value):
46
+ """Check if the value given is of type unicode string.
47
+
48
+ The argument 'variable' needs to be set to the name (as a string) of the
49
+ value which is checked.
50
+ """
51
+
52
+ check_is_non_empty_string('variable', variable)
53
+
54
+
55
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
56
+
57
+
58
+ def check_is_string_or_unicode_string(variable, value):
59
+ """Check if the value given is of type string or unicode string.
60
+
61
+ The argument 'variable' needs to be set to the name (as a string) of the
62
+ value which is checked.
63
+ """
64
+
65
+ check_is_non_empty_string('variable', variable)
66
+
67
+ if not isinstance(value, str):
68
+ raise Exception(
69
+ 'Value of "%s" is neither a string nor a Unicode ' % (variable)
70
+ + 'string: %s (%s)' % (str(value), type(value))
71
+ )
72
+
73
+
74
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
75
+
76
+
77
+ def check_is_non_empty_string(variable, value):
78
+ """Check if the value given is of type string and is not an empty string.
79
+
80
+ The argument 'variable' needs to be set to the name (as a string) of the
81
+ value which is checked.
82
+ """
83
+
84
+ if (not isinstance(variable, str)) or (variable == ''):
85
+ raise Exception(
86
+ 'Value of "variable" is not a non-empty string: %s (%s)'
87
+ % (str(variable), type(variable))
88
+ )
89
+
90
+ if (not isinstance(value, str)) or (value == ''):
91
+ raise Exception(
92
+ 'Value of "%s" is not a non-empty string: %s (%s)'
93
+ % (variable, str(value), type(value))
94
+ )
95
+
96
+
97
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
98
+
99
+
100
+ def check_is_number(variable, value):
101
+ """Check if the value given is a number, i.e. of type integer or float.
102
+
103
+ The argument 'variable' needs to be set to the name (as a string) of the
104
+ value which is checked.
105
+ """
106
+
107
+ check_is_non_empty_string('variable', variable)
108
+
109
+ if (not isinstance(value, int)) and (not isinstance(value, float)):
110
+ raise Exception(
111
+ 'Value of "%s" is not a number: %s (%s)' % (variable, str(value), type(value))
112
+ )
113
+
114
+
115
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
116
+
117
+
118
+ def check_is_positive(variable, value):
119
+ """Check if the value given is a positive number, i.e. of type integer or
120
+ float, and larger than zero.
121
+
122
+ The argument 'variable' needs to be set to the name (as a string) of the
123
+ value which is checked.
124
+ """
125
+
126
+ check_is_non_empty_string('variable', variable)
127
+
128
+ if (not isinstance(value, int)) and (not isinstance(value, float)) or (value <= 0.0):
129
+ raise Exception(
130
+ 'Value of "%s" is not a positive number: ' % (variable)
131
+ + '%s (%s)' % (str(value), type(value))
132
+ )
133
+
134
+
135
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
136
+
137
+
138
+ def check_is_not_negative(variable, value):
139
+ """Check if the value given is a non-negative number, i.e. of type integer or
140
+ float, and larger than or equal to zero.
141
+
142
+ The argument 'variable' needs to be set to the name (as a string) of the
143
+ value which is checked.
144
+ """
145
+
146
+ check_is_non_empty_string('variable', variable)
147
+
148
+ if (not isinstance(value, int)) and (not isinstance(value, float)) or (value < 0.0):
149
+ raise Exception(
150
+ 'Value of "%s" is not a number or it is a ' % (variable)
151
+ + 'negative number: %s (%s)' % (str(value), type(value))
152
+ )
153
+
154
+
155
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
156
+
157
+
158
+ def check_is_normalised(variable, value):
159
+ """Check if the value given is a number, i.e. of type integer or float, and
160
+ between (including) 0.0 and 1.0.
161
+
162
+ The argument 'variable' needs to be set to the name (as a string) of the
163
+ value which is checked.
164
+ """
165
+
166
+ check_is_non_empty_string('variable', variable)
167
+
168
+ if (
169
+ (not isinstance(value, int))
170
+ and (not isinstance(value, float))
171
+ or (value < 0.0)
172
+ or (value > 1.0)
173
+ ):
174
+ raise Exception(
175
+ 'Value of "%s" is not a normalised number ' % (variable)
176
+ + '(between 0.0 and 1.0): %s (%s)' % (str(value), type(value))
177
+ )
178
+
179
+
180
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
181
+
182
+
183
+ def check_is_percentage(variable, value):
184
+ """Check if the value given is a number, i.e. of type integer or float, and
185
+ between (including) 0 and 100.
186
+
187
+ The argument 'variable' needs to be set to the name (as a string) of the
188
+ value which is checked.
189
+ """
190
+
191
+ check_is_non_empty_string('variable', variable)
192
+
193
+ if (
194
+ (not isinstance(value, int))
195
+ and (not isinstance(value, float))
196
+ or (value < 0.0)
197
+ or (value > 100.0)
198
+ ):
199
+ raise Exception(
200
+ 'Value of "%s" is not a percentage number ' % (variable)
201
+ + '(between 0.0 and 100.0): %s (%s)' % (str(value), type(value))
202
+ )
203
+
204
+
205
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
206
+
207
+
208
+ def check_is_integer(variable, value):
209
+ """Check if the value given is an integer number.
210
+
211
+ The argument 'variable' needs to be set to the name (as a string) of the
212
+ value which is checked.
213
+ """
214
+
215
+ check_is_non_empty_string('variable', variable)
216
+
217
+ if not isinstance(value, int):
218
+ raise Exception(
219
+ 'Value of "%s" is not an integer: %s (%s)'
220
+ % (variable, str(value), type(value))
221
+ )
222
+
223
+
224
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
225
+
226
+
227
+ def check_is_float(variable, value):
228
+ """Check if the value given is a floating-point number.
229
+
230
+ The argument 'variable' needs to be set to the name (as a string) of the
231
+ value which is checked.
232
+ """
233
+
234
+ check_is_non_empty_string('variable', variable)
235
+
236
+ if not isinstance(value, float):
237
+ raise Exception(
238
+ 'Value of "%s" is not a floating point ' % (variable)
239
+ + 'number: %s (%s)' % (str(value), type(value))
240
+ )
241
+
242
+
243
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
244
+
245
+
246
+ def check_is_dictionary(variable, value):
247
+ """Check if the value given is of type dictionary.
248
+
249
+ The argument 'variable' needs to be set to the name (as a string) of the
250
+ value which is checked.
251
+ """
252
+
253
+ check_is_non_empty_string('variable', variable)
254
+
255
+ if not isinstance(value, dict):
256
+ raise Exception('Value of "%s" is not a dictionary: %s' % (variable, type(value)))
257
+
258
+
259
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
260
+
261
+
262
+ def check_is_list(variable, value):
263
+ """Check if the value given is of type dictionary.
264
+
265
+ The argument 'variable' needs to be set to the name (as a string) of the
266
+ value which is checked.
267
+ """
268
+
269
+ check_is_non_empty_string('variable', variable)
270
+
271
+ if not isinstance(value, list):
272
+ raise Exception('Value of "%s" is not a list: %s' % (variable, type(value)))
273
+
274
+
275
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
276
+
277
+
278
+ def check_is_set(variable, value):
279
+ """Check if the value given is of type set.
280
+
281
+ The argument 'variable' needs to be set to the name (as a string) of the
282
+ value which is checked.
283
+ """
284
+
285
+ check_is_non_empty_string('variable', variable)
286
+
287
+ if not isinstance(value, set):
288
+ raise Exception('Value of "%s" is not a set: %s' % (variable, type(value)))
289
+
290
+
291
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
292
+
293
+
294
+ def check_is_tuple(variable, value):
295
+ """Check if the value given is of type tuple.
296
+
297
+ The argument 'variable' needs to be set to the name (as a string) of the
298
+ value which is checked.
299
+ """
300
+
301
+ check_is_non_empty_string('variable', variable)
302
+
303
+ if not isinstance(value, tuple):
304
+ raise Exception('Value of "%s" is not a tuple: %s' % (variable, type(value)))
305
+
306
+
307
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
308
+
309
+
310
+ def check_is_flag(variable, value):
311
+ """Check if the value given is either True or False.
312
+
313
+ The argument 'variable' needs to be set to the name (as a string) of the
314
+ value which is checked.
315
+ """
316
+
317
+ check_is_non_empty_string('variable', variable)
318
+
319
+ if value not in [True, False]:
320
+ raise Exception('Value of "%s" is not True or False: %s' % (variable, str(value)))
321
+
322
+
323
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
324
+
325
+
326
+ def check_is_function_or_method(variable, value):
327
+ """Check if the value given is a function or method.
328
+
329
+ The argument 'variable' needs to be set to the name (as a string) of the
330
+ value which is checked.
331
+ """
332
+
333
+ check_is_non_empty_string('variable', variable)
334
+
335
+ if type(value) not in [types.FunctionType, types.MethodType]:
336
+ raise Exception(
337
+ '%s is not a function or method: %s' % (str(variable), type(value))
338
+ )
339
+
340
+
341
+ def check_unicode_encoding_exists(unicode_encoding_str):
342
+ """A function which checks if the given Unicode encoding string is known to
343
+ the Python codec registry.
344
+
345
+ If the string is unknown this functions ends with an exception.
346
+ """
347
+
348
+ check_is_string_or_unicode_string('unicode_encoding_str', unicode_encoding_str)
349
+
350
+ try:
351
+ codecs.lookup(unicode_encoding_str)
352
+ except:
353
+ raise Exception('Unknown Unicode encoding string: "%s"' % (unicode_encoding_str))
354
+
355
+
356
+ def char_set_ascii(s):
357
+ """Determine if the input string contains digits, letters, or both, as well
358
+ as whitespaces or not.
359
+
360
+ Returns a string containing the set of corresponding characters.
361
+ """
362
+
363
+ check_is_string_or_unicode_string('s', s)
364
+
365
+ if len(s) == 0:
366
+ return ''
367
+
368
+ if ' ' in s:
369
+ includes_spaces = True
370
+ else:
371
+ includes_spaces = False
372
+
373
+ # Remove whitespaces
374
+ #
375
+ check_str = s.replace(' ', '')
376
+
377
+ # Check if string contains characters other than alpha-numeric characters
378
+ #
379
+ if check_str.isalnum() == False:
380
+ return ''
381
+
382
+ # Return an empty string rather than stopping program
383
+ #
384
+ # raise Exception, 'The string "%s" contains characters other than ' % \
385
+ # (check_str) + 'alpha numeric and whitespace'
386
+
387
+ # Check if string contains letters only, digits only, or both
388
+ #
389
+ if check_str.isdigit() == True:
390
+ char_set = '0123456789'
391
+ elif check_str.isalpha() == True:
392
+ char_set = 'abcdefghijklmnopqrstuvwxyz'
393
+ else:
394
+ char_set = 'abcdefghijklmnopqrstuvwxyz0123456789'
395
+
396
+ if includes_spaces == True:
397
+ char_set += ' '
398
+
399
+ return char_set
400
+
401
+
402
+ def check_is_valid_format_str(variable, value):
403
+ """Check if the value given is a valid formatting string for numbers.
404
+ Possible formatting values are:
405
+
406
+ int, float1, float2, float3, float4, float5, float6, float7, float8, or
407
+ float9
408
+
409
+ The argument 'variable' needs to be set to the name (as a string) of the
410
+ value which is checked.
411
+ """
412
+
413
+ check_is_non_empty_string('variable', variable)
414
+
415
+ if value not in [
416
+ 'int',
417
+ 'float1',
418
+ 'float2',
419
+ 'float3',
420
+ 'float4',
421
+ 'float5',
422
+ 'float6',
423
+ 'float7',
424
+ 'float8',
425
+ 'float9',
426
+ ]:
427
+ raise Exception(
428
+ '%s is not a validformat string: %s' % (str(variable), type(value))
429
+ )
430
+
431
+
432
+ def float_to_str(f, format_str):
433
+ """Convert the given floating-point (or integer) number into a string
434
+ according to the format string given.
435
+
436
+ The format string can be one of 'int' (return a string that corresponds to
437
+ an integer value), or 'float1', 'float2', ..., 'float9' which returns a
438
+ string of the number with the specified number of digits behind the comma.
439
+ """
440
+
441
+ check_is_number('f', f)
442
+
443
+ check_is_string('format_str', format_str)
444
+ check_is_valid_format_str('format_str', format_str)
445
+
446
+ if format_str == 'int':
447
+ f_str = '%.0f' % (f)
448
+ elif format_str == 'float1':
449
+ f_str = '%.1f' % (f)
450
+ elif format_str == 'float2':
451
+ f_str = '%.2f' % (f)
452
+ elif format_str == 'float3':
453
+ f_str = '%.3f' % (f)
454
+ elif format_str == 'float4':
455
+ f_str = '%.4f' % (f)
456
+ elif format_str == 'float5':
457
+ f_str = '%.5f' % (f)
458
+ elif format_str == 'float6':
459
+ f_str = '%.6f' % (f)
460
+ elif format_str == 'float7':
461
+ f_str = '%.7f' % (f)
462
+ elif format_str == 'float8':
463
+ f_str = '%.8f' % (f)
464
+ elif format_str == 'float9':
465
+ f_str = '%.9f' % (f)
466
+ else:
467
+ raise Exception('Illegal string format given: "%s"' % (format_str))
468
+
469
+ return f_str
470
+
471
+
472
+ def str2comma_separated_list(s):
473
+ """A function which splits the values in a list at commas, and checks all
474
+ values if they are quoted (double or single) at both ends or not. Quotes
475
+ are removed.
476
+
477
+ Note that this function will split values that are quoted but contain one
478
+ or more commas into several values.
479
+ """
480
+
481
+ check_is_unicode_string('s', s)
482
+
483
+ in_list = s.split(',')
484
+ out_list = []
485
+
486
+ for e in in_list:
487
+ e = e.strip()
488
+ if (e.startswith('"') and e.endswith('"')) or (
489
+ e.startswith("'") and e.endswith("'")
490
+ ):
491
+ e = e[1:-1] # Remove quotes
492
+ out_list.append(e)
493
+
494
+ return out_list
495
+
496
+
497
+ def read_csv_file(file_name, encoding, header_line):
498
+ """Read a comma separated values (CSV) file from disk using the given Unicode
499
+ encoding.
500
+
501
+ Arguments:
502
+ file_name Name of the file to read.
503
+
504
+ encoding The name of a Unicode encoding to be used when reading the
505
+ file.
506
+ If set to None then the standard 'ascii' encoding will be
507
+ used.
508
+
509
+ header_line A flag, set to True or False, that has to be set according
510
+ to if the frequency file starts with a header line or not.
511
+
512
+ This function returns two items:
513
+ - If given, a list that contains the values in the header line of the
514
+ file. If no header line was given, this item will be set to None.
515
+
516
+ - A list containing the records in the CSV file, each as a list.
517
+
518
+ Notes:
519
+ - Lines starting with # are assumed to contain comments and will be
520
+ skipped. Lines that are empty will also be skipped.
521
+ - The CSV files must not contain commas in the values, while values
522
+ in quotes (double or single) can be handled.
523
+ """
524
+
525
+ check_is_string('file_name', file_name)
526
+ check_is_flag('header_line', header_line)
527
+
528
+ # if encoding == None: # Use default ASCII encoding
529
+ # encoding = 'ascii'
530
+ # check_is_string('encoding', encoding)
531
+ # check_unicode_encoding_exists(encoding)
532
+ if not os.path.exists(file_name):
533
+ file_name = os.path.join(DATA, file_name)
534
+ try:
535
+ in_file = open(os.path.join(DATA, file_name), encoding="utf-8")
536
+ except:
537
+ raise IOError('Cannot read CSV file "%s"' % (file_name))
538
+
539
+ if header_line == True:
540
+ header_line = in_file.readline()
541
+ # print 'Header line:', header_line
542
+
543
+ header_list = str2comma_separated_list(header_line)
544
+
545
+ else:
546
+ # print 'No header line'
547
+ header_list = None
548
+
549
+ file_data = []
550
+
551
+ for line_str in in_file:
552
+ line_str = line_str.strip()
553
+ if (line_str.startswith('#') == False) and (line_str != ''):
554
+ line_list = str2comma_separated_list(line_str)
555
+
556
+ file_data.append(line_list)
557
+
558
+ in_file.close()
559
+
560
+ return header_list, file_data
561
+
562
+
563
+ def write_csv_file(file_name, encoding, header_list, file_data):
564
+ """Write a comma separated values (CSV) file to disk using the given Unicode
565
+ encoding.
566
+
567
+ Arguments:
568
+ file_name Name of the file to write.
569
+
570
+ encoding The name of a Unicode encoding to be used when reading the
571
+ file.
572
+ If set to None then the standard 'ascii' encoding will be
573
+ used.
574
+
575
+ header_list A list containing the attribute (field) names to be written
576
+ at the beginning of the file.
577
+ If no header line is to be written then this argument needs
578
+ to be set to None.
579
+
580
+ file_data A list containing the records to be written into the CSV
581
+ file. Each record must be a list of values, and these values
582
+ will be concatenated with commas and written into the file.
583
+ It is assumed the values given do not contain comas.
584
+ """
585
+
586
+ check_is_string('file_name', file_name)
587
+ check_is_list('file_data', file_data)
588
+
589
+ if encoding == None: # Use default ASCII encoding
590
+ encoding = 'ascii'
591
+ check_is_string('encoding', encoding)
592
+ check_unicode_encoding_exists(encoding)
593
+
594
+ try:
595
+ out_file = codecs.open(file_name, 'w', encoding=encoding)
596
+ except:
597
+ raise IOError('Cannot write CSV file "%s"' % (file_name))
598
+
599
+ if header_list != None:
600
+ check_is_list('header_list', header_list)
601
+ header_str = ','.join(header_list)
602
+ # print 'Header line:', header_str
603
+ out_file.write(header_str + os.linesep)
604
+
605
+ i = 0
606
+ for rec_list in file_data:
607
+ check_is_list('rec_list %d' % (i), rec_list)
608
+
609
+ line_str = ','.join(rec_list)
610
+ out_file.write(line_str + os.linesep)
611
+
612
+ i += 1
613
+
614
+ out_file.close()
geco_data_generator/contdepfunct.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Functions to generate dependent continuous attribute
4
+ import random
5
+
6
+
7
+ def blood_pressure_depending_on_age(age):
8
+ """Randomly generate a blood pressure value depending upon the given age
9
+ value.
10
+
11
+ It is assumed that for a given age value the blood pressure is normally
12
+ distributed with an average blood pressure of 75 at birth (age 0) and of
13
+ 90 at age 100, and standard deviation in blood pressure of 4.
14
+ """
15
+
16
+ if (not isinstance(age, int)) and (not isinstance(age, float)):
17
+ raise Exception('Age value given is not a number: %s' % (str(age)))
18
+
19
+ if (age < 0) or (age > 130):
20
+ raise Exception('Age value below 0 or above 130 given')
21
+
22
+ avrg_bp = 75.0 + age / 100.0
23
+
24
+ std_dev_bp = 4.0
25
+
26
+ bp = random.normalvariate(avrg_bp, std_dev_bp)
27
+
28
+ if bp < 0.0:
29
+ bp = 0.0
30
+ print('Warning, blood pressure value of 0.0 returned!')
31
+
32
+ return bp
33
+
34
+
35
+ # -----------------------------------------------------------------------------
36
+
37
+
38
+ def salary_depending_on_age(age):
39
+ """Randomly generate a salary value depending upon the given age value.
40
+
41
+ It is assumed that for a given age value the salary is uniformly
42
+ distributed with an average salary of between 20,000 at age 18 (salary
43
+ will be set to 0 if an age is below 18) and 80,000 at age 60.
44
+
45
+ The minimum salary will stay at 10,000 while the maximum salary will
46
+ increase from 30,000 at age 18 to 150,000 at age 60.
47
+ """
48
+
49
+ if (not isinstance(age, int)) and (not isinstance(age, float)):
50
+ raise Exception('Age value given is not a number: %s' % (str(age)))
51
+
52
+ if (age < 0) or (age > 130):
53
+ raise Exception('Age value below 0 or above 130 given')
54
+
55
+ if age < 18.0:
56
+ sal = 0.0
57
+
58
+ else:
59
+ min_sal = 10000.0
60
+ max_sal = 10000.0 + (age - 18.0) * (140000.0 / 42)
61
+
62
+ sal = random.uniform(min_sal, max_sal)
63
+
64
+ return sal
geco_data_generator/corruptor.py ADDED
@@ -0,0 +1,2035 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Main classes to corrupt attribute values and records
4
+ import math
5
+ import random
6
+
7
+ from geco_data_generator import basefunctions
8
+
9
+
10
+ # =============================================================================
11
+ # Helper functions to randomly select a position for where to apply a
12
+ # modification
13
+
14
+
15
+ def position_mod_uniform(in_str):
16
+ """Select any position in the given input string with uniform likelihood.
17
+
18
+ Return 0 is the string is empty.
19
+ """
20
+
21
+ if in_str == '': # Empty input string
22
+ return 0
23
+
24
+ max_pos = len(in_str) - 1
25
+
26
+ pos = random.randint(0, max_pos) # String positions start at 0
27
+
28
+ return pos
29
+
30
+
31
+ # -----------------------------------------------------------------------------
32
+
33
+
34
+ def position_mod_normal(in_str):
35
+ """Select any position in the given input string with normally distributed
36
+ likelihood where the average of the normal distribution is set to one
37
+ character behind the middle of the string, and the standard deviation is
38
+ set to 1/4 of the string length.
39
+
40
+ This is based on studies on the distribution of errors in real text which
41
+ showed that errors such as typographical mistakes are more likely to
42
+ appear towards the middle and end of a string but not at the beginning.
43
+
44
+ Return 0 is the string is empty.
45
+ """
46
+
47
+ if in_str == '': # Empty input string
48
+ return 0
49
+
50
+ str_len = len(in_str)
51
+
52
+ mid_pos = str_len / 2.0 + 1
53
+ std_dev = str_len / 4.0
54
+ max_pos = str_len - 1
55
+
56
+ pos = int(round(random.gauss(mid_pos, std_dev)))
57
+ while (pos < 0) or (pos > max_pos):
58
+ pos = int(round(random.gauss(mid_pos, std_dev)))
59
+
60
+ return pos
61
+
62
+
63
+ # =============================================================================
64
+ # Classes for corrupting a value in a single attribute (field) of the data set
65
+ # =============================================================================
66
+
67
+
68
+ class CorruptValue:
69
+ """Base class for the definition of corruptor that is applied on a single
70
+ attribute (field) in the data set.
71
+
72
+ This class and all of its derived classes provide methods that allow the
73
+ definition of how values in a single attribute are corrupted (modified)
74
+ and the parameters necessary for the corruption process.
75
+
76
+ The following variables need to be set when a CorruptValue instance is
77
+ initialised (with further parameters listed in the derived classes):
78
+
79
+ position_function A function that (somehow) determines the location
80
+ within a string value of where a modification
81
+ (corruption) is to be applied. The input of this
82
+ function is assumed to be a string and its return value
83
+ an integer number in the range of the length of the
84
+ given input string.
85
+ """
86
+
87
+ # ---------------------------------------------------------------------------
88
+
89
+ def __init__(self, base_kwargs):
90
+ """Constructor, set general attributes."""
91
+
92
+ # General attributes for all attribute corruptors.
93
+ #
94
+ self.position_function = None
95
+
96
+ # Process the keyword argument (all keywords specific to a certain data
97
+ # generator type were processed in the derived class constructor)
98
+ #
99
+ for (keyword, value) in base_kwargs.items():
100
+
101
+ if keyword.startswith('position'):
102
+ basefunctions.check_is_function_or_method('position_function', value)
103
+ self.position_function = value
104
+
105
+ else:
106
+ raise Exception(
107
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
108
+ )
109
+
110
+ basefunctions.check_is_function_or_method(
111
+ 'position_function', self.position_function
112
+ )
113
+
114
+ # Check if the position function does return an integer value
115
+ #
116
+ pos = self.position_function('test')
117
+ if (not isinstance(pos, int)) or (pos < 0) or (pos > 3):
118
+ raise Exception(
119
+ 'Position function returns an illegal value (either'
120
+ + 'not an integer or and integer out of range: %s' % (str(pos))
121
+ )
122
+
123
+ # ---------------------------------------------------------------------------
124
+
125
+ def corrupt_value(self, str):
126
+ """Method which corrupts the given input string and returns the modified
127
+ string.
128
+ See implementations in derived classes for details.
129
+ """
130
+
131
+ raise Exception('Override abstract method in derived class')
132
+
133
+
134
+ # =============================================================================
135
+
136
+
137
+ class CorruptMissingValue(CorruptValue):
138
+ """A corruptor method which simply sets an attribute value to a missing
139
+ value.
140
+
141
+ The additional argument (besides the base class argument
142
+ 'position_function') that has to be set when this attribute type is
143
+ initialised are:
144
+
145
+ missing_val The string which designates a missing value. Default value
146
+ is the empty string ''.
147
+
148
+ Note that the 'position_function' is not required by this corruptor
149
+ method.
150
+ """
151
+
152
+ # ---------------------------------------------------------------------------
153
+
154
+ def __init__(self, **kwargs):
155
+ """Constructor. Process the derived keywords first, then call the base
156
+ class constructor.
157
+ """
158
+
159
+ self.missing_val = ''
160
+ self.name = 'Missing value'
161
+
162
+ def dummy_position(s): # Define a dummy position function
163
+ return 0
164
+
165
+ # Process all keyword arguments
166
+ #
167
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
168
+
169
+ for (keyword, value) in kwargs.items():
170
+
171
+ if keyword.startswith('miss'):
172
+ basefunctions.check_is_string('missing_val', value)
173
+ self.missing_val = value
174
+
175
+ else:
176
+ base_kwargs[keyword] = value
177
+
178
+ base_kwargs['position_function'] = dummy_position
179
+
180
+ CorruptValue.__init__(self, base_kwargs) # Process base arguments
181
+
182
+ # ---------------------------------------------------------------------------
183
+
184
+ def corrupt_value(self, in_str):
185
+ """Simply return the missing value string."""
186
+
187
+ return self.missing_val
188
+
189
+
190
+ # =============================================================================
191
+
192
+
193
+ class CorruptValueEdit(CorruptValue):
194
+ """A simple corruptor which applies one edit operation on the given value.
195
+
196
+ Depending upon the content of the value (letters, digits or mixed), if the
197
+ edit operation is an insert or substitution a character from the same set
198
+ (letters, digits or both) is selected.
199
+
200
+ The additional arguments (besides the base class argument
201
+ 'position_function') that has to be set when this attribute type is
202
+ initialised are:
203
+
204
+ char_set_funct A function which determines the set of characters that
205
+ can be inserted or used of substitution
206
+ insert_prob These for values set the likelihood of which edit
207
+ delete_prob operation will be selected.
208
+ substitute_prob All four probability values must be between 0 and 1, and
209
+ transpose_prob the sum of these four values must be 1.0
210
+ """
211
+
212
+ # ---------------------------------------------------------------------------
213
+
214
+ def __init__(self, **kwargs):
215
+ """Constructor. Process the derived keywords first, then call the base
216
+ class constructor.
217
+ """
218
+
219
+ self.char_set_funct = None
220
+ self.insert_prob = None
221
+ self.delete_prob = None
222
+ self.substitute_prob = None
223
+ self.transpose_prob = None
224
+ self.name = 'Edit operation'
225
+
226
+ # Process all keyword arguments
227
+ #
228
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
229
+
230
+ for (keyword, value) in kwargs.items():
231
+
232
+ if keyword.startswith('char'):
233
+ basefunctions.check_is_function_or_method('char_set_funct', value)
234
+ self.char_set_funct = value
235
+
236
+ elif keyword.startswith('ins'):
237
+ basefunctions.check_is_normalised('insert_prob', value)
238
+ self.insert_prob = value
239
+
240
+ elif keyword.startswith('del'):
241
+ basefunctions.check_is_normalised('delete_prob', value)
242
+ self.delete_prob = value
243
+
244
+ elif keyword.startswith('sub'):
245
+ basefunctions.check_is_normalised('substitute_prob', value)
246
+ self.substitute_prob = value
247
+
248
+ elif keyword.startswith('tran'):
249
+ basefunctions.check_is_normalised('transpose_prob', value)
250
+ self.transpose_prob = value
251
+
252
+ else:
253
+ base_kwargs[keyword] = value
254
+
255
+ CorruptValue.__init__(self, base_kwargs) # Process base arguments
256
+
257
+ # Check if the necessary variables have been set
258
+ #
259
+ basefunctions.check_is_function_or_method('char_set_funct', self.char_set_funct)
260
+ basefunctions.check_is_normalised('insert_prob', self.insert_prob)
261
+ basefunctions.check_is_normalised('delete_prob', self.delete_prob)
262
+ basefunctions.check_is_normalised('substitute_prob', self.substitute_prob)
263
+ basefunctions.check_is_normalised('transpose_prob', self.transpose_prob)
264
+
265
+ # Check if the character set function returns a string
266
+ #
267
+ test_str = self.char_set_funct('test') # This might become a problem
268
+ basefunctions.check_is_string_or_unicode_string('test_str', test_str)
269
+
270
+ if (
271
+ abs(
272
+ (
273
+ self.insert_prob
274
+ + self.delete_prob
275
+ + self.substitute_prob
276
+ + self.transpose_prob
277
+ )
278
+ - 1.0
279
+ )
280
+ > 0.0000001
281
+ ):
282
+ raise Exception('The four edit probabilities do not sum to 1.0')
283
+
284
+ # Calculate the probability ranges for the four edit operations
285
+ #
286
+ self.insert_range = [0.0, self.insert_prob]
287
+ self.delete_range = [self.insert_range[1], self.insert_range[1] + self.delete_prob]
288
+ self.substitute_range = [
289
+ self.delete_range[1],
290
+ self.delete_range[1] + self.substitute_prob,
291
+ ]
292
+ self.transpose_range = [
293
+ self.substitute_range[1],
294
+ self.substitute_range[1] + self.transpose_prob,
295
+ ]
296
+ assert self.transpose_range[1] == 1.0
297
+
298
+ # ---------------------------------------------------------------------------
299
+
300
+ def corrupt_value(self, in_str):
301
+ """Method which corrupts the given input string and returns the modified
302
+ string by randomly selecting an edit operation and position in the
303
+ string where to apply this edit.
304
+ """
305
+
306
+ if len(in_str) == 0: # Empty string, no modification possible
307
+ return in_str
308
+
309
+ # Randomly select an edit operation
310
+ #
311
+ r = random.random()
312
+
313
+ if r < self.insert_range[1]:
314
+ edit_op = 'ins'
315
+ elif (r >= self.delete_range[0]) and (r < self.delete_range[1]):
316
+ edit_op = 'del'
317
+ elif (r >= self.substitute_range[0]) and (r < self.substitute_range[1]):
318
+ edit_op = 'sub'
319
+ else:
320
+ edit_op = 'tra'
321
+
322
+ # Do some checks if only a valid edit operations was selected
323
+ #
324
+ if edit_op == 'ins':
325
+ assert self.insert_prob > 0.0
326
+ elif edit_op == 'del':
327
+ assert self.delete_prob > 0.0
328
+ elif edit_op == 'sub':
329
+ assert self.substitute_prob > 0.0
330
+ else:
331
+ assert self.transpose_prob > 0.0
332
+
333
+ # If the input string is empty only insert is possible
334
+ #
335
+ if (len(in_str) == 0) and (edit_op != 'ins'):
336
+ return in_str # Return input string without modification
337
+
338
+ # If the input string only has one character then transposition is not
339
+ # possible
340
+ #
341
+ if (len(in_str) == 1) and (edit_op == 'tra'):
342
+ return in_str # Return input string without modification
343
+
344
+ # Position in string where to apply the modification
345
+ #
346
+ # For a transposition we cannot select the last position in the string
347
+ # while for an insert we can specify the position after the last
348
+ if edit_op == 'tra':
349
+ len_in_str = in_str[:-1]
350
+ elif edit_op == 'ins':
351
+ len_in_str = in_str + 'x'
352
+ else:
353
+ len_in_str = in_str
354
+ mod_pos = self.position_function(len_in_str)
355
+
356
+ # Get the set of possible characters that can be inserted or substituted
357
+ #
358
+ char_set = self.char_set_funct(in_str)
359
+
360
+ if char_set == '': # No possible value change
361
+ return in_str
362
+
363
+ if edit_op == 'ins': # Insert a character
364
+ ins_char = random.choice(char_set)
365
+ new_str = in_str[:mod_pos] + ins_char + in_str[mod_pos:]
366
+
367
+ elif edit_op == 'del': # Delete a character
368
+ new_str = in_str[:mod_pos] + in_str[mod_pos + 1 :]
369
+
370
+ elif edit_op == 'sub': # Substitute a character
371
+ sub_char = random.choice(char_set)
372
+ new_str = in_str[:mod_pos] + sub_char + in_str[mod_pos + 1 :]
373
+
374
+ else: # Transpose two characters
375
+ char1 = in_str[mod_pos]
376
+ char2 = in_str[mod_pos + 1]
377
+ new_str = in_str[:mod_pos] + char2 + char1 + in_str[mod_pos + 2 :]
378
+
379
+ return new_str
380
+
381
+
382
+ # =============================================================================
383
+
384
+
385
+ class CorruptValueKeyboard(CorruptValue):
386
+ """Use a keyboard layout to simulate typing errors. They keyboard is
387
+ hard-coded into this method, but can be changed easily for different
388
+ keyboard layout.
389
+
390
+ A character from the original input string will be randomly chosen using
391
+ the position function, and then a character from either the same row or
392
+ column in the keyboard will be selected.
393
+
394
+ The additional arguments (besides the base class argument
395
+ 'position_function') that have to be set when this attribute type is
396
+ initialised are:
397
+
398
+ row_prob The probability that a neighbouring character in the same row
399
+ is selected.
400
+
401
+ col_prob The probability that a neighbouring character in the same
402
+ column is selected.
403
+
404
+ The sum of row_prob and col_prob must be 1.0.
405
+ """
406
+
407
+ # ---------------------------------------------------------------------------
408
+
409
+ def __init__(self, **kwargs):
410
+ """Constructor. Process the derived keywords first, then call the base
411
+ class constructor.
412
+ """
413
+
414
+ self.row_prob = None
415
+ self.col_prob = None
416
+ self.name = 'Keybord value'
417
+
418
+ # Process all keyword arguments
419
+ #
420
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
421
+
422
+ for (keyword, value) in kwargs.items():
423
+
424
+ if keyword.startswith('row'):
425
+ basefunctions.check_is_normalised('row_prob', value)
426
+ self.row_prob = value
427
+
428
+ elif keyword.startswith('col'):
429
+ basefunctions.check_is_normalised('col_prob', value)
430
+ self.col_prob = value
431
+
432
+ else:
433
+ base_kwargs[keyword] = value
434
+
435
+ CorruptValue.__init__(self, base_kwargs) # Process base arguments
436
+
437
+ # Check if the necessary variables have been set
438
+ #
439
+ basefunctions.check_is_normalised('row_prob', self.row_prob)
440
+ basefunctions.check_is_normalised('col_prob', self.col_prob)
441
+
442
+ if abs((self.row_prob + self.col_prob) - 1.0) > 0.0000001:
443
+ raise Exception('Sum of row and column probablities does not sum ' + 'to 1.0')
444
+
445
+ # Keyboard substitutions gives two dictionaries with the neigbouring keys
446
+ # for all leters both for rows and columns (based on ideas implemented by
447
+ # Mauricio A. Hernandez in his dbgen).
448
+ # This following data structures assume a QWERTY keyboard layout
449
+ #
450
+ self.rows = {
451
+ 'a': 's',
452
+ 'b': 'vn',
453
+ 'c': 'xv',
454
+ 'd': 'sf',
455
+ 'e': 'wr',
456
+ 'f': 'dg',
457
+ 'g': 'fh',
458
+ 'h': 'gj',
459
+ 'i': 'uo',
460
+ 'j': 'hk',
461
+ 'k': 'jl',
462
+ 'l': 'k',
463
+ 'm': 'n',
464
+ 'n': 'bm',
465
+ 'o': 'ip',
466
+ 'p': 'o',
467
+ 'q': 'w',
468
+ 'r': 'et',
469
+ 's': 'ad',
470
+ 't': 'ry',
471
+ 'u': 'yi',
472
+ 'v': 'cb',
473
+ 'w': 'qe',
474
+ 'x': 'zc',
475
+ 'y': 'tu',
476
+ 'z': 'x',
477
+ '1': '2',
478
+ '2': '13',
479
+ '3': '24',
480
+ '4': '35',
481
+ '5': '46',
482
+ '6': '57',
483
+ '7': '68',
484
+ '8': '79',
485
+ '9': '80',
486
+ '0': '9',
487
+ }
488
+
489
+ self.cols = {
490
+ 'a': 'qzw',
491
+ 'b': 'gh',
492
+ 'c': 'df',
493
+ 'd': 'erc',
494
+ 'e': 'ds34',
495
+ 'f': 'rvc',
496
+ 'g': 'tbv',
497
+ 'h': 'ybn',
498
+ 'i': 'k89',
499
+ 'j': 'umn',
500
+ 'k': 'im',
501
+ 'l': 'o',
502
+ 'm': 'jk',
503
+ 'n': 'hj',
504
+ 'o': 'l90',
505
+ 'p': '0',
506
+ 'q': 'a12',
507
+ 'r': 'f45',
508
+ 's': 'wxz',
509
+ 't': 'g56',
510
+ 'u': 'j78',
511
+ 'v': 'fg',
512
+ 'w': 's23',
513
+ 'x': 'sd',
514
+ 'y': 'h67',
515
+ 'z': 'as',
516
+ '1': 'q',
517
+ '2': 'qw',
518
+ '3': 'we',
519
+ '4': 'er',
520
+ '5': 'rt',
521
+ '6': 'ty',
522
+ '7': 'yu',
523
+ '8': 'ui',
524
+ '9': 'io',
525
+ '0': 'op',
526
+ }
527
+
528
+ # ---------------------------------------------------------------------------
529
+
530
+ def corrupt_value(self, in_str):
531
+ """Method which corrupts the given input string by replacing a single
532
+ character with a neighbouring character given the defined keyboard
533
+ layout at a position randomly selected by the position function.
534
+ """
535
+
536
+ if len(in_str) == 0: # Empty string, no modification possible
537
+ return in_str
538
+
539
+ max_try = 10 # Maximum number of tries to find a keyboard modification at
540
+ # a randomly selected position
541
+
542
+ done_key_mod = False # A flag, set to true once a modification is done
543
+ try_num = 0
544
+
545
+ mod_str = in_str[:] # Make a copy of the string which will be modified
546
+
547
+ while (done_key_mod == False) and (try_num < max_try):
548
+
549
+ mod_pos = self.position_function(mod_str)
550
+ mod_char = mod_str[mod_pos]
551
+
552
+ r = random.random() # Create a random number between 0 and 1
553
+
554
+ if r <= self.row_prob: # See if there is a row modification
555
+ if mod_char in self.rows:
556
+ key_mod_chars = self.rows[mod_char]
557
+ done_key_mod = True
558
+
559
+ else: # See if there is a column modification
560
+ if mod_char in self.cols:
561
+ key_mod_chars = self.cols[mod_char]
562
+ done_key_mod = True
563
+
564
+ if done_key_mod == False:
565
+ try_num += 1
566
+
567
+ # If a modification is possible do it
568
+ #
569
+ if done_key_mod == True:
570
+
571
+ # Randomly select one of the possible characters
572
+ #
573
+ new_char = random.choice(key_mod_chars)
574
+
575
+ mod_str = mod_str[:mod_pos] + new_char + mod_str[mod_pos + 1 :]
576
+
577
+ assert len(mod_str) == len(in_str)
578
+
579
+ return mod_str
580
+
581
+
582
+ # =============================================================================
583
+
584
+
585
+ class CorruptValueOCR(CorruptValue):
586
+ """Simulate OCR errors using a list of similar pairs of characters or strings
587
+ that will be applied on the original string values.
588
+
589
+ These pairs of characters will be loaded from a look-up file which is a
590
+ CSV file with two columns, the first is a single character or character
591
+ sequence, and the second column is also a single character or character
592
+ sequence. It is assumed that the second value is an OCR modification of
593
+ the first value, and the other way round. For example:
594
+
595
+ 5,S
596
+ 5,s
597
+ 2,Z
598
+ 2,z
599
+ 1,|
600
+ 6,G
601
+
602
+ It is possible for an 'original' string value (first column) to have
603
+ several variations (second column). In such a case one variation will be
604
+ randomly selected during the value corruption (modification) process.
605
+
606
+ The additional arguments (besides the base class argument
607
+ 'position_function') that have to be set when this attribute type is
608
+ initialised are:
609
+
610
+ lookup_file_name Name of the file which contains the OCR character
611
+ variations.
612
+
613
+ has_header_line A flag, set to True or False, that has to be set
614
+ according to if the look-up file starts with a header
615
+ line or not.
616
+
617
+ unicode_encoding The Unicode encoding (a string name) of the file.
618
+ """
619
+
620
+ # ---------------------------------------------------------------------------
621
+
622
+ def __init__(self, **kwargs):
623
+ """Constructor. Process the derived keywords first, then call the base
624
+ class constructor.
625
+ """
626
+
627
+ self.lookup_file_name = None
628
+ self.has_header_line = None
629
+ self.unicode_encoding = None
630
+ self.ocr_val_dict = {} # The dictionary to hold the OCR variations
631
+ self.name = 'OCR value'
632
+
633
+ # Process all keyword arguments
634
+ #
635
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
636
+
637
+ for (keyword, value) in kwargs.items():
638
+
639
+ if keyword.startswith('look'):
640
+ basefunctions.check_is_non_empty_string('lookup_file_name', value)
641
+ self.lookup_file_name = value
642
+
643
+ elif keyword.startswith('has'):
644
+ basefunctions.check_is_flag('has_header_line', value)
645
+ self.has_header_line = value
646
+
647
+ elif keyword.startswith('unicode'):
648
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
649
+ self.unicode_encoding = value
650
+
651
+ else:
652
+ base_kwargs[keyword] = value
653
+
654
+ CorruptValue.__init__(self, base_kwargs) # Process base arguments
655
+
656
+ # Check if the necessary variables have been set
657
+ #
658
+ basefunctions.check_is_non_empty_string('lookup_file_name', self.lookup_file_name)
659
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
660
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
661
+
662
+ # Load the OCR variations lookup file - - - - - - - - - - - - - - - - - - -
663
+ #
664
+ header_list, lookup_file_data = basefunctions.read_csv_file(
665
+ self.lookup_file_name, self.unicode_encoding, self.has_header_line
666
+ )
667
+
668
+ # Process values from file and their frequencies
669
+ #
670
+ for rec_list in lookup_file_data:
671
+ if len(rec_list) != 2:
672
+ raise Exception(
673
+ 'Illegal format in OCR variations lookup file '
674
+ + '%s: %s' % (self.lookup_file_name, str(rec_list))
675
+ )
676
+ org_val = rec_list[0].strip()
677
+ var_val = rec_list[1].strip()
678
+
679
+ if org_val == '':
680
+ raise Exception(
681
+ 'Empty original OCR value in lookup file %s' % (self.lookup_file_name)
682
+ )
683
+ if var_val == '':
684
+ raise Exception(
685
+ 'Empty OCR variation value in lookup file %s' % (self.lookup_file_name)
686
+ )
687
+ if org_val == var_val:
688
+ raise Exception(
689
+ 'OCR variation is the same as original value in '
690
+ + 'lookup file %s' % (self.lookup_file_name)
691
+ )
692
+
693
+ # Now insert the OCR original value and variation twice (with original
694
+ # and variation both as key and value), i.e. swapped
695
+ #
696
+ this_org_val_list = self.ocr_val_dict.get(org_val, [])
697
+ this_org_val_list.append(var_val)
698
+ self.ocr_val_dict[org_val] = this_org_val_list
699
+
700
+ this_org_val_list = self.ocr_val_dict.get(var_val, [])
701
+ this_org_val_list.append(org_val)
702
+ self.ocr_val_dict[var_val] = this_org_val_list
703
+
704
+ # ---------------------------------------------------------------------------
705
+
706
+ def corrupt_value(self, in_str):
707
+ """Method which corrupts the given input string by replacing a single
708
+ character or a sequence of characters with an OCR variation at a
709
+ position randomly selected by the position function.
710
+
711
+ If there are several OCR variations then one will be randomly chosen.
712
+ """
713
+
714
+ if len(in_str) == 0: # Empty string, no modification possible
715
+ return in_str
716
+
717
+ max_try = 10 # Maximum number of tries to find an OCR modification at a
718
+ # randomly selected position
719
+
720
+ done_ocr_mod = False # A flag, set to True once a modification is done
721
+ try_num = 0
722
+
723
+ mod_str = in_str[:] # Make a copy of the string which will be modified
724
+
725
+ while (done_ocr_mod == False) and (try_num < max_try):
726
+
727
+ mod_pos = self.position_function(mod_str)
728
+
729
+ # Try one to three characters at selected position
730
+ #
731
+ ocr_org_char_set = set(
732
+ [
733
+ mod_str[mod_pos],
734
+ mod_str[mod_pos : mod_pos + 2],
735
+ mod_str[mod_pos : mod_pos + 3],
736
+ ]
737
+ )
738
+
739
+ mod_options = [] # List of possible modifications that can be applied
740
+
741
+ for ocr_org_char in ocr_org_char_set:
742
+ if ocr_org_char in self.ocr_val_dict:
743
+ ocr_var_list = self.ocr_val_dict[ocr_org_char]
744
+ for mod_val in ocr_var_list:
745
+ mod_options.append([ocr_org_char, len(ocr_org_char), mod_val])
746
+
747
+ if mod_options != []: # Modifications are possible
748
+
749
+ # Randomly select one of the possible modifications that can be applied
750
+ #
751
+ mod_to_apply = random.choice(mod_options)
752
+ assert mod_to_apply[0] in self.ocr_val_dict.keys()
753
+ assert mod_to_apply[2] in self.ocr_val_dict.keys()
754
+
755
+ mod_str = (
756
+ in_str[:mod_pos]
757
+ + mod_to_apply[2]
758
+ + in_str[mod_pos + mod_to_apply[1] :]
759
+ )
760
+
761
+ done_ocr_mod = True
762
+
763
+ else:
764
+ try_num += 1
765
+
766
+ return mod_str
767
+
768
+
769
+ # =============================================================================
770
+
771
+
772
+ class CorruptValuePhonetic(CorruptValue):
773
+ """Simulate phonetic errors using a list of phonetic rules which are stored
774
+ in a CSV look-up file.
775
+
776
+ Each line (row) in the CSV file must consist of seven columns that contain
777
+ the following information:
778
+ 1) Where a phonetic modification can be applied. Possible values are:
779
+ 'ALL','START','END','MIDDLE'
780
+ 2) The original character sequence (i.e. the characters to be replaced)
781
+ 3) The new character sequence (which will replace the original sequence)
782
+ 4) Precondition: A condition that must occur before the original string
783
+ character sequence in order for this rule to become applicable.
784
+ 5) Postcondition: Similarly, a condition that must occur after the
785
+ original string character sequence in order for this rule to become
786
+ applicable.
787
+ 6) Pattern existence condition: This condition requires that a certain
788
+ given string character sequence does ('y' flag) or does not ('n' flag)
789
+ occur in the input string.
790
+ 7) Start existence condition: Similarly, this condition requires that the
791
+ input string starts with a certain string pattern ('y' flag) or not
792
+ ('n' flag)
793
+
794
+ A detailed description of this phonetic data generation is available in
795
+
796
+ Accurate Synthetic Generation of Realistic Personal Information
797
+ Peter Christen and Agus Pudjijono
798
+ Proceedings of the Pacific-Asia Conference on Knowledge Discovery and
799
+ Data Mining (PAKDD), Bangkok, Thailand, April 2009.
800
+
801
+ For a given input string, one of the possible phonetic modifications will
802
+ be randomly selected without the use of the position function.
803
+
804
+ The additional arguments (besides the base class argument
805
+ 'position_function') that have to be set when this attribute type is
806
+ initialised are:
807
+
808
+ lookup_file_name Name of the file which contains the phonetic
809
+ modification patterns.
810
+
811
+ has_header_line A flag, set to True or False, that has to be set
812
+ according to if the look-up file starts with a header
813
+ line or not.
814
+
815
+ unicode_encoding The Unicode encoding (a string name) of the file.
816
+
817
+ Note that the 'position_function' is not required by this corruptor
818
+ method.
819
+ """
820
+
821
+ # ---------------------------------------------------------------------------
822
+
823
+ def __init__(self, **kwargs):
824
+ """Constructor. Process the derived keywords first, then call the base
825
+ class constructor.
826
+ """
827
+
828
+ self.lookup_file_name = None
829
+ self.has_header_line = None
830
+ self.unicode_encoding = None
831
+ self.replace_table = []
832
+ self.name = 'Phonetic value'
833
+
834
+ def dummy_position(s): # Define a dummy position function
835
+ return 0
836
+
837
+ # Process all keyword arguments
838
+ #
839
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
840
+
841
+ for (keyword, value) in kwargs.items():
842
+
843
+ if keyword.startswith('look'):
844
+ basefunctions.check_is_non_empty_string('lookup_file_name', value)
845
+ self.lookup_file_name = value
846
+
847
+ elif keyword.startswith('has'):
848
+ basefunctions.check_is_flag('has_header_line', value)
849
+ self.has_header_line = value
850
+
851
+ elif keyword.startswith('unicode'):
852
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
853
+ self.unicode_encoding = value
854
+
855
+ else:
856
+ base_kwargs[keyword] = value
857
+
858
+ base_kwargs['position_function'] = dummy_position
859
+
860
+ CorruptValue.__init__(self, base_kwargs) # Process base arguments
861
+
862
+ # Check if the necessary variables have been set
863
+ #
864
+ basefunctions.check_is_non_empty_string('lookup_file_name', self.lookup_file_name)
865
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
866
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
867
+
868
+ # Load the misspelling lookup file - - - - - - - - - - - - - - - - - - - - -
869
+ #
870
+ header_list, lookup_file_data = basefunctions.read_csv_file(
871
+ self.lookup_file_name, self.unicode_encoding, self.has_header_line
872
+ )
873
+
874
+ # Process values from file and misspellings
875
+ #
876
+ for rec_list in lookup_file_data:
877
+ if len(rec_list) != 7:
878
+ raise Exception(
879
+ 'Illegal format in phonetic lookup file %s: %s'
880
+ % (self.lookup_file_name, str(rec_list))
881
+ )
882
+ val_tuple = ()
883
+ for val in rec_list:
884
+ if val != '':
885
+ val = val.strip()
886
+ val_tuple += (val,)
887
+ else:
888
+ raise Exception(
889
+ 'Empty value in phonetic lookup file %s" %s'
890
+ % (self.lookup_file_name, str(rec_list))
891
+ )
892
+ self.replace_table.append(val_tuple)
893
+
894
+ # ---------------------------------------------------------------------------
895
+
896
+ def __apply_change__(self, in_str, ch):
897
+ """Helper function which will apply the selected change to the input
898
+ string.
899
+
900
+ Developed by Agus Pudjijono, ANU, 2008.
901
+ """
902
+
903
+ work_str = in_str
904
+ list_ch = ch.split('>')
905
+ subs = list_ch[1]
906
+ if list_ch[1] == '@': # @ is blank
907
+ subs = ''
908
+ tmp_str = work_str
909
+ org_pat_length = len(list_ch[0])
910
+ str_length = len(work_str)
911
+
912
+ if list_ch[2] == 'end':
913
+ org_pat_start = work_str.find(list_ch[0], str_length - org_pat_length)
914
+ elif list_ch[2] == 'middle':
915
+ org_pat_start = work_str.find(list_ch[0], 1)
916
+ else: # Start and all
917
+ org_pat_start = work_str.find(list_ch[0], 0)
918
+
919
+ if org_pat_start == 0:
920
+ work_str = subs + work_str[org_pat_length:]
921
+ elif org_pat_start > 0:
922
+ work_str = (
923
+ work_str[:org_pat_start]
924
+ + subs
925
+ + work_str[org_pat_start + org_pat_length :]
926
+ )
927
+
928
+ # if (work_str == tmp_str):
929
+ # work_str = str_to_change
930
+
931
+ return work_str
932
+
933
+ # ---------------------------------------------------------------------------
934
+
935
+ def __slavo_germanic__(self, in_str):
936
+ """Helper function which determines if the inputstring could contain a
937
+ Slavo or Germanic name.
938
+
939
+ Developed by Agus Pudjijono, ANU, 2008.
940
+ """
941
+
942
+ if (
943
+ (in_str.find('w') > -1)
944
+ or (in_str.find('k') > -1)
945
+ or (in_str.find('cz') > -1)
946
+ or (in_str.find('witz') > -1)
947
+ ):
948
+ return 1
949
+ else:
950
+ return 0
951
+
952
+ # ---------------------------------------------------------------------------
953
+
954
+ def __collect_replacement__(
955
+ self, s, where, orgpat, newpat, precond, postcond, existcond, startcond
956
+ ):
957
+ """Helper function which collects all the possible phonetic modification
958
+ patterns that are possible on the given input string, and replaces a
959
+ pattern in a string.
960
+
961
+ The following arguments are needed:
962
+ - where Can be one of: 'ALL','START','END','MIDDLE'
963
+ - precond Pre-condition (default 'None') can be 'V' for vowel or
964
+ 'C' for consonant
965
+ - postcond Post-condition (default 'None') can be 'V' for vowel or
966
+ 'C' for consonant
967
+
968
+ Developed by Agus Pudjijono, ANU, 2008.
969
+ """
970
+
971
+ vowels = 'aeiouy'
972
+ tmpstr = s
973
+ changesstr = ''
974
+
975
+ start_search = 0 # Position from where to start the search
976
+ pat_len = len(orgpat)
977
+ stop = False
978
+
979
+ # As long as pattern is in string
980
+ #
981
+ while (orgpat in tmpstr[start_search:]) and (stop == False):
982
+
983
+ pat_start = tmpstr.find(orgpat, start_search)
984
+ str_len = len(tmpstr)
985
+
986
+ # Check conditions of previous and following character
987
+ #
988
+ OKpre = False # Previous character condition
989
+ OKpre1 = False # Previous character1 condition
990
+ OKpre2 = False # Previous character2 condition
991
+
992
+ OKpost = False # Following character condition
993
+ OKpost1 = False # Following character1 condition
994
+ OKpost2 = False # Following character2 condition
995
+
996
+ OKexist = False # Existing pattern condition
997
+ OKstart = False # Existing start pattern condition
998
+
999
+ index = 0
1000
+
1001
+ if precond == 'None':
1002
+ OKpre = True
1003
+
1004
+ elif pat_start > 0:
1005
+ if ((precond == 'V') and (tmpstr[pat_start - 1] in vowels)) or (
1006
+ (precond == 'C') and (tmpstr[pat_start - 1] not in vowels)
1007
+ ):
1008
+ OKpre = True
1009
+
1010
+ elif (precond.find(';')) > -1:
1011
+ if precond.find('|') > -1:
1012
+ rls = precond.split('|')
1013
+ rl1 = rls[0].split(';')
1014
+
1015
+ if int(rl1[1]) < 0:
1016
+ index = pat_start + int(rl1[1])
1017
+ else:
1018
+ index = pat_start + (len(orgpat) - 1) + int(rl1[1])
1019
+
1020
+ i = 2
1021
+ if rl1[0] == 'n':
1022
+ while i < (len(rl1)):
1023
+ if tmpstr[index : (index + len(rl1[i]))] == rl1[i]:
1024
+ OKpre1 = False
1025
+ break
1026
+ else:
1027
+ OKpre1 = True
1028
+ i += 1
1029
+ else:
1030
+ while i < (len(rl1)):
1031
+ if tmpstr[index : (index + len(rl1[i]))] == rl1[i]:
1032
+ OKpre1 = True
1033
+ break
1034
+ i += 1
1035
+
1036
+ rl2 = rls[1].split(';')
1037
+
1038
+ if int(rl2[1]) < 0:
1039
+ index = pat_start + int(rl2[1])
1040
+ else:
1041
+ index = pat_start + (len(orgpat) - 1) + int(rl2[1])
1042
+
1043
+ i = 2
1044
+ if rl2[0] == 'n':
1045
+ while i < (len(rl2)):
1046
+ if tmpstr[index : (index + len(rl2[i]))] == rl2[i]:
1047
+ OKpre2 = False
1048
+ break
1049
+ else:
1050
+ OKpre2 = True
1051
+ i += 1
1052
+ else:
1053
+ while i < (len(rl2)):
1054
+ if tmpstr[index : (index + len(rl2[i]))] == rl2[i]:
1055
+ OKpre2 = True
1056
+ break
1057
+ i += 1
1058
+
1059
+ OKpre = OKpre1 and OKpre2
1060
+
1061
+ else:
1062
+ rl = precond.split(';')
1063
+ # -
1064
+ if int(rl[1]) < 0:
1065
+ index = pat_start + int(rl[1])
1066
+ else:
1067
+ index = pat_start + (len(orgpat) - 1) + int(rl[1])
1068
+
1069
+ i = 2
1070
+ if rl[0] == 'n':
1071
+ while i < (len(rl)):
1072
+ if tmpstr[index : (index + len(rl[i]))] == rl[i]:
1073
+ OKpre = False
1074
+ break
1075
+ else:
1076
+ OKpre = True
1077
+ i += 1
1078
+ else:
1079
+ while i < (len(rl)):
1080
+ if tmpstr[index : (index + len(rl[i]))] == rl[i]:
1081
+ OKpre = True
1082
+ break
1083
+ i += 1
1084
+
1085
+ if postcond == 'None':
1086
+ OKpost = True
1087
+
1088
+ else:
1089
+ pat_end = pat_start + pat_len
1090
+
1091
+ if pat_end < str_len:
1092
+ if ((postcond == 'V') and (tmpstr[pat_end] in vowels)) or (
1093
+ (postcond == 'C') and (tmpstr[pat_end] not in vowels)
1094
+ ):
1095
+ OKpost = True
1096
+ elif (postcond.find(';')) > -1:
1097
+ if postcond.find('|') > -1:
1098
+ rls = postcond.split('|')
1099
+
1100
+ rl1 = rls[0].split(';')
1101
+
1102
+ if int(rl1[1]) < 0:
1103
+ index = pat_start + int(rl1[1])
1104
+ else:
1105
+ index = pat_start + (len(orgpat) - 1) + int(rl1[1])
1106
+
1107
+ i = 2
1108
+ if rl1[0] == 'n':
1109
+ while i < (len(rl1)):
1110
+ if tmpstr[index : (index + len(rl1[i]))] == rl1[i]:
1111
+ OKpost1 = False
1112
+ break
1113
+ else:
1114
+ OKpost1 = True
1115
+ i += 1
1116
+ else:
1117
+ while i < (len(rl1)):
1118
+ if tmpstr[index : (index + len(rl1[i]))] == rl1[i]:
1119
+ OKpost1 = True
1120
+ break
1121
+ i += 1
1122
+
1123
+ rl2 = rls[1].split(';')
1124
+
1125
+ if int(rl2[1]) < 0:
1126
+ index = pat_start + int(rl2[1])
1127
+ else:
1128
+ index = pat_start + (len(orgpat) - 1) + int(rl2[1])
1129
+
1130
+ i = 2
1131
+ if rl2[0] == 'n':
1132
+ while i < (len(rl2)):
1133
+ if tmpstr[index : (index + len(rl2[i]))] == rl2[i]:
1134
+ OKpost2 = False
1135
+ break
1136
+ else:
1137
+ OKpost2 = True
1138
+ i += 1
1139
+ else:
1140
+ while i < (len(rl2)):
1141
+ if tmpstr[index : (index + len(rl2[i]))] == rl2[i]:
1142
+ OKpost2 = True
1143
+ break
1144
+ i += 1
1145
+
1146
+ OKpost = OKpost1 and OKpost2
1147
+
1148
+ else:
1149
+ rl = postcond.split(';')
1150
+
1151
+ if int(rl[1]) < 0:
1152
+ index = pat_start + int(rl[1])
1153
+ else:
1154
+ index = pat_start + (len(orgpat) - 1) + int(rl[1])
1155
+
1156
+ i = 2
1157
+ if rl[0] == 'n':
1158
+ while i < (len(rl)):
1159
+ if tmpstr[index : (index + len(rl[i]))] == rl[i]:
1160
+ OKpost = False
1161
+ break
1162
+ else:
1163
+ OKpost = True
1164
+ i += 1
1165
+ else:
1166
+ while i < (len(rl)):
1167
+ if tmpstr[index : (index + len(rl[i]))] == rl[i]:
1168
+ OKpost = True
1169
+ break
1170
+ i += 1
1171
+
1172
+ if existcond == 'None':
1173
+ OKexist = True
1174
+
1175
+ else:
1176
+ rl = existcond.split(';')
1177
+ if rl[1] == 'slavo':
1178
+ r = self.__slavo_germanic__(s)
1179
+ if rl[0] == 'n':
1180
+ if r == 0:
1181
+ OKexist = True
1182
+ else:
1183
+ if r == 1:
1184
+ OKexist = True
1185
+ else:
1186
+ i = 1
1187
+ if rl[0] == 'n':
1188
+ while i < (len(rl)):
1189
+ if s.find(rl[i]) > -1:
1190
+ OKexist = False
1191
+ break
1192
+ else:
1193
+ OKexist = True
1194
+ i += i
1195
+ else:
1196
+ while i < (len(rl)):
1197
+ if s.find(rl[i]) > -1:
1198
+ OKexist = True
1199
+ break
1200
+ i += i
1201
+
1202
+ if startcond == 'None':
1203
+ OKstart = True
1204
+
1205
+ else:
1206
+ rl = startcond.split(';')
1207
+ i = 1
1208
+ if rl[0] == 'n':
1209
+ while i < (len(rl)):
1210
+ if s.find(rl[i]) > -1:
1211
+ OKstart = False
1212
+ break
1213
+ else:
1214
+ OKstart = True
1215
+ i += i
1216
+ else:
1217
+ while i < (len(rl)):
1218
+ if s.find(rl[i]) == 0:
1219
+ OKstart = True
1220
+ break
1221
+ i += i
1222
+
1223
+ # Replace pattern if conditions and position OK
1224
+ #
1225
+ if (
1226
+ (OKpre == True)
1227
+ and (OKpost == True)
1228
+ and (OKexist == True)
1229
+ and (OKstart == True)
1230
+ ) and (
1231
+ ((where == 'START') and (pat_start == 0))
1232
+ or (
1233
+ (where == 'MIDDLE')
1234
+ and (pat_start > 0)
1235
+ and (pat_start + pat_len < str_len)
1236
+ )
1237
+ or ((where == 'END') and (pat_start + pat_len == str_len))
1238
+ or (where == 'ALL')
1239
+ ):
1240
+ tmpstr = tmpstr[:pat_start] + newpat + tmpstr[pat_start + pat_len :]
1241
+ changesstr += ',' + orgpat + '>' + newpat + '>' + where.lower()
1242
+ start_search = pat_start + len(newpat)
1243
+
1244
+ else:
1245
+ start_search = pat_start + 1
1246
+
1247
+ if start_search >= (len(tmpstr) - 1):
1248
+ stop = True
1249
+
1250
+ tmpstr += changesstr
1251
+
1252
+ return tmpstr
1253
+
1254
+ # ---------------------------------------------------------------------------
1255
+
1256
+ def __get_transformation__(self, in_str):
1257
+ """Helper function which generates the list of possible phonetic
1258
+ modifications for the given input string.
1259
+
1260
+ Developed by Agus Pudjijono, ANU, 2008.
1261
+ """
1262
+
1263
+ if in_str == '':
1264
+ return in_str
1265
+
1266
+ changesstr2 = ''
1267
+
1268
+ workstr = in_str
1269
+
1270
+ for rtpl in self.replace_table: # Check all transformations in the table
1271
+ if len(rtpl) == 3:
1272
+ rtpl += ('None', 'None', 'None', 'None')
1273
+
1274
+ workstr = self.__collect_replacement__(
1275
+ in_str, rtpl[0], rtpl[1], rtpl[2], rtpl[3], rtpl[4], rtpl[5], rtpl[6]
1276
+ )
1277
+ if workstr.find(',') > -1:
1278
+ tmpstr = workstr.split(',')
1279
+ workstr = tmpstr[0]
1280
+ if changesstr2.find(tmpstr[1]) == -1:
1281
+ changesstr2 += tmpstr[1] + ';'
1282
+ workstr += ',' + changesstr2
1283
+
1284
+ return workstr
1285
+
1286
+ # ---------------------------------------------------------------------------
1287
+
1288
+ def corrupt_value(self, in_str):
1289
+ """Method which corrupts the given input string by applying a phonetic
1290
+ modification.
1291
+
1292
+ If several such modifications are possible then one will be randomly
1293
+ selected.
1294
+ """
1295
+
1296
+ if len(in_str) == 0: # Empty string, no modification possible
1297
+ return in_str
1298
+
1299
+ # Get the possible phonetic modifications for this input string
1300
+ #
1301
+ phonetic_changes = self.__get_transformation__(in_str)
1302
+
1303
+ mod_str = in_str
1304
+
1305
+ if ',' in phonetic_changes: # Several modifications possible
1306
+ tmp_str = phonetic_changes.split(',')
1307
+ pc = tmp_str[1][:-1] # Remove the last ';'
1308
+ list_pc = pc.split(';')
1309
+ change_op = random.choice(list_pc)
1310
+ if change_op != '':
1311
+ mod_str = self.__apply_change__(in_str, change_op)
1312
+ # print in_str, mod_str, change_op
1313
+
1314
+ return mod_str
1315
+
1316
+
1317
+ # =============================================================================
1318
+
1319
+
1320
+ class CorruptCategoricalValue(CorruptValue):
1321
+ """Replace a categorical value with another categorical value from the same
1322
+ look-up file.
1323
+
1324
+ This corruptor can be used to modify attribute values with known
1325
+ misspellings.
1326
+
1327
+ The look-up file is a CSV file with two columns, the first is a
1328
+ categorical value that is expected to be in an attribute in an original
1329
+ record, and the second is a variation of this categorical value.
1330
+
1331
+ It is possible for an 'original' categorical value (first column) to have
1332
+ several misspelling variations (second column). In such a case one
1333
+ misspelling will be randomly selected.
1334
+
1335
+ The additional arguments (besides the base class argument
1336
+ 'position_function') that have to be set when this attribute type is
1337
+ initialised are:
1338
+
1339
+ lookup_file_name Name of the file which contains the categorical values
1340
+ and their misspellings.
1341
+
1342
+ has_header_line A flag, set to True or False, that has to be set
1343
+ according to if the look-up file starts with a header
1344
+ line or not.
1345
+
1346
+ unicode_encoding The Unicode encoding (a string name) of the file.
1347
+
1348
+ Note that the 'position_function' is not required by this corruptor
1349
+ method.
1350
+ """
1351
+
1352
+ # ---------------------------------------------------------------------------
1353
+
1354
+ def __init__(self, **kwargs):
1355
+ """Constructor. Process the derived keywords first, then call the base
1356
+ class constructor.
1357
+ """
1358
+
1359
+ self.lookup_file_name = None
1360
+ self.has_header_line = None
1361
+ self.unicode_encoding = None
1362
+ self.misspell_dict = {} # The dictionary to hold the misspellings
1363
+ self.name = 'Categorial value'
1364
+
1365
+ def dummy_position(s): # Define a dummy position function
1366
+ return 0
1367
+
1368
+ # Process all keyword arguments
1369
+ #
1370
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
1371
+
1372
+ for (keyword, value) in kwargs.items():
1373
+
1374
+ if keyword.startswith('look'):
1375
+ basefunctions.check_is_non_empty_string('lookup_file_name', value)
1376
+ self.lookup_file_name = value
1377
+
1378
+ elif keyword.startswith('has'):
1379
+ basefunctions.check_is_flag('has_header_line', value)
1380
+ self.has_header_line = value
1381
+
1382
+ elif keyword.startswith('unicode'):
1383
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
1384
+ self.unicode_encoding = value
1385
+
1386
+ else:
1387
+ base_kwargs[keyword] = value
1388
+
1389
+ base_kwargs['position_function'] = dummy_position
1390
+
1391
+ CorruptValue.__init__(self, base_kwargs) # Process base arguments
1392
+
1393
+ # Check if the necessary variables have been set
1394
+ #
1395
+ basefunctions.check_is_non_empty_string('lookup_file_name', self.lookup_file_name)
1396
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
1397
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
1398
+
1399
+ # Load the misspelling lookup file - - - - - - - - - - - - - - - - - - - - -
1400
+ #
1401
+ header_list, lookup_file_data = basefunctions.read_csv_file(
1402
+ self.lookup_file_name, self.unicode_encoding, self.has_header_line
1403
+ )
1404
+
1405
+ # Process values from file and misspellings
1406
+ #
1407
+ for rec_list in lookup_file_data:
1408
+ if len(rec_list) != 2:
1409
+ raise Exception(
1410
+ 'Illegal format in misspellings lookup file %s: %s'
1411
+ % (self.lookup_file_name, str(rec_list))
1412
+ )
1413
+
1414
+ org_val = rec_list[0].strip()
1415
+ if org_val == '':
1416
+ raise Exception(
1417
+ 'Empty original attribute value in lookup file %s'
1418
+ % (self.lookup_file_name)
1419
+ )
1420
+ misspell_val = rec_list[1].strip()
1421
+ if misspell_val == '':
1422
+ raise Exception(
1423
+ 'Empty misspelled attribute value in lookup '
1424
+ + 'file %s' % (self.lookup_file_name)
1425
+ )
1426
+ if org_val == misspell_val:
1427
+ raise Exception(
1428
+ 'Misspelled value is the same as original value'
1429
+ + ' in lookup file %s' % (self.lookup_file_name)
1430
+ )
1431
+
1432
+ this_org_val_list = self.misspell_dict.get(org_val, [])
1433
+ this_org_val_list.append(misspell_val)
1434
+ self.misspell_dict[org_val] = this_org_val_list
1435
+
1436
+ # ---------------------------------------------------------------------------
1437
+
1438
+ def corrupt_value(self, in_str):
1439
+ """Method which corrupts the given input string and replaces it with a
1440
+ misspelling, if there is a known misspelling for the given original
1441
+ value.
1442
+
1443
+ If there are several known misspellings for the given original value
1444
+ then one will be randomly selected.
1445
+ """
1446
+
1447
+ if len(in_str) == 0: # Empty string, no modification possible
1448
+ return in_str
1449
+
1450
+ if in_str not in self.misspell_dict: # No misspelling for this value
1451
+ return in_str
1452
+
1453
+ misspell_list = self.misspell_dict[in_str]
1454
+
1455
+ return random.choice(misspell_list)
1456
+
1457
+
1458
+ # =============================================================================
1459
+
1460
+
1461
+ class CorruptDataSet:
1462
+ """Class which provides methods to corrupt the original records generated by
1463
+ one of the classes derived from the GenerateDataSet base class.
1464
+
1465
+ The following arguments need to be set when a GenerateDataSet instance is
1466
+ initialised:
1467
+
1468
+ number_of_mod_records The number of modified (corrupted) records that are
1469
+ to be generated. This will correspond to the number
1470
+ of 'duplicate' records that are generated.
1471
+
1472
+ number_of_org_records The number of original records that were generated
1473
+ by the GenerateDataSet class.
1474
+
1475
+ attribute_name_list The list of attributes (fields) that have been
1476
+ generated for each record.
1477
+
1478
+ max_num_dup_per_rec The maximum number of modified (corrupted) records
1479
+ that can be generated for a single original record.
1480
+
1481
+ num_dup_dist The probability distribution used to create the
1482
+ duplicate records for one original record (possible
1483
+ distributions are: 'uniform', 'poisson', 'zipf')
1484
+
1485
+ max_num_mod_per_attr The maximum number of modifications are to be
1486
+ applied on a single attribute.
1487
+
1488
+ num_mod_per_rec The number of modification that are to be applied
1489
+ to a record
1490
+
1491
+ attr_mod_prob_dict This dictionary contains probabilities that
1492
+ determine how likely an attribute is selected for
1493
+ random modification (corruption).
1494
+ Keys are attribute names and values are probability
1495
+ values. The sum of the given probabilities must sum
1496
+ to 1.0.
1497
+ Not all attributes need to be listed in this
1498
+ dictionary, only the ones onto which modifications
1499
+ are to be applied.
1500
+ An example of such a dictionary is given below.
1501
+
1502
+ attr_mod_data_dict A dictionary which contains for each attribute that
1503
+ is to be modified a list which contains as pairs of
1504
+ probabilities and corruptor objects (i.e. objects
1505
+ based on any of the classes derived from base class
1506
+ CorruptValue).
1507
+ For each attribute listed, the sum of probabilities
1508
+ given in its list must sum to 1.0.
1509
+ An example of such a dictionary is given below.
1510
+
1511
+ Example for 'attr_mod_prob_dict':
1512
+
1513
+ attr_mod_prob_dict = {'surname':0.4, 'address':0.6}
1514
+
1515
+ In this example, the surname attribute will be selected for modification
1516
+ with a 40% likelihood and the address attribute with a 60% likelihood.
1517
+
1518
+ Example for 'attr_mod_data_dict':
1519
+
1520
+ attr_mod_data_dict = {'surname':[(0.25,corrupt_ocr), (0.50:corrupt_edit),
1521
+ (0.25:corrupt_keyboard)],
1522
+ 'address':[(0.50:corrupt_ocr), (0.20:missing_value),
1523
+ (0.25:corrupt_keyboard)]}
1524
+
1525
+ In this example, if the 'surname' is selected for modification, with a
1526
+ 25% likelihood an OCR modification will be applied, with 50% likelihood a
1527
+ character edit modification will be applied, and with 25% likelihood a
1528
+ keyboard typing error modification will be applied.
1529
+ If the 'address' attribute is selected, then with 50% likelihood an OCR
1530
+ modification will be applied, with 20% likelihood a value will be set to
1531
+ a missing value, and with 25% likelihood a keyboard typing error
1532
+ modification will be applied.
1533
+ """
1534
+
1535
+ # ---------------------------------------------------------------------------
1536
+
1537
+ def __init__(self, **kwargs):
1538
+ """Constructor, set attributes."""
1539
+
1540
+ self.number_of_mod_records = None
1541
+ self.number_of_org_records = None
1542
+ self.attribute_name_list = None
1543
+ self.max_num_dup_per_rec = None
1544
+ self.num_dup_dist = None
1545
+ self.num_mod_per_rec = None
1546
+ self.max_num_mod_per_attr = None
1547
+ self.attr_mod_prob_dict = None
1548
+ self.attr_mod_data_dict = None
1549
+
1550
+ # Process the keyword arguments
1551
+ #
1552
+ for (keyword, value) in kwargs.items():
1553
+
1554
+ if keyword.startswith('number_of_m'):
1555
+ basefunctions.check_is_integer('number_of_mod_records', value)
1556
+ basefunctions.check_is_positive('number_of_mod_records', value)
1557
+ self.number_of_mod_records = value
1558
+
1559
+ elif keyword.startswith('number_of_o'):
1560
+ basefunctions.check_is_integer('number_of_org_records', value)
1561
+ basefunctions.check_is_positive('number_of_org_records', value)
1562
+ self.number_of_org_records = value
1563
+
1564
+ elif keyword.startswith('attribute'):
1565
+ basefunctions.check_is_list('attribute_name_list', value)
1566
+ self.attribute_name_list = value
1567
+
1568
+ elif keyword.startswith('max_num_dup'):
1569
+ basefunctions.check_is_integer('max_num_dup_per_rec', value)
1570
+ basefunctions.check_is_positive('max_num_dup_per_rec', value)
1571
+ self.max_num_dup_per_rec = value
1572
+
1573
+ elif keyword.startswith('num_dup_'):
1574
+ if value not in ['uniform', 'poisson', 'zipf']:
1575
+ raise Exception(
1576
+ 'Illegal value given for "num_dup_dist": %s' % (str(value))
1577
+ )
1578
+ self.num_dup_dist = value
1579
+
1580
+ elif keyword.startswith('num_mod_per_r'):
1581
+ basefunctions.check_is_integer('num_mod_per_rec', value)
1582
+ basefunctions.check_is_positive('num_mod_per_rec', value)
1583
+ self.num_mod_per_rec = value
1584
+
1585
+ elif keyword.startswith('max_num_mod_per_a'):
1586
+ basefunctions.check_is_integer('max_num_mod_per_attr', value)
1587
+ basefunctions.check_is_positive('max_num_mod_per_attr', value)
1588
+ self.max_num_mod_per_attr = value
1589
+
1590
+ elif keyword.startswith('attr_mod_p'):
1591
+ basefunctions.check_is_dictionary('attr_mod_prob_dict', value)
1592
+ self.attr_mod_prob_dict = value
1593
+
1594
+ elif keyword.startswith('attr_mod_d'):
1595
+ basefunctions.check_is_dictionary('attr_mod_data_dict', value)
1596
+ self.attr_mod_data_dict = value
1597
+
1598
+ else:
1599
+ raise Exception(
1600
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
1601
+ )
1602
+
1603
+ # Check if the necessary variables have been set
1604
+ #
1605
+ basefunctions.check_is_integer('number_of_mod_records', self.number_of_mod_records)
1606
+ basefunctions.check_is_positive(
1607
+ 'number_of_mod_records', self.number_of_mod_records
1608
+ )
1609
+ basefunctions.check_is_integer('number_of_org_records', self.number_of_org_records)
1610
+ basefunctions.check_is_positive(
1611
+ 'number_of_org_records', self.number_of_org_records
1612
+ )
1613
+ basefunctions.check_is_list('attribute_name_list', self.attribute_name_list)
1614
+ basefunctions.check_is_integer('max_num_dup_per_rec', self.max_num_dup_per_rec)
1615
+ basefunctions.check_is_positive('max_num_dup_per_rec', self.max_num_dup_per_rec)
1616
+ basefunctions.check_is_string('num_dup_dist', self.num_dup_dist)
1617
+ basefunctions.check_is_integer('num_mod_per_rec', self.num_mod_per_rec)
1618
+ basefunctions.check_is_positive('num_mod_per_rec', self.num_mod_per_rec)
1619
+ basefunctions.check_is_integer('max_num_mod_per_attr', self.max_num_mod_per_attr)
1620
+ basefunctions.check_is_positive('max_num_mod_per_attr', self.max_num_mod_per_attr)
1621
+ if self.max_num_mod_per_attr > self.num_mod_per_rec:
1622
+ raise Exception(
1623
+ 'Number of modifications per record must be larger'
1624
+ + ' than maximum number of modifications per attribute'
1625
+ )
1626
+ basefunctions.check_is_dictionary('attr_mod_prob_dict', self.attr_mod_prob_dict)
1627
+ basefunctions.check_is_dictionary('attr_mod_data_dict', self.attr_mod_data_dict)
1628
+
1629
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1630
+ # Check if it is possible to generate the desired number of modified
1631
+ # (duplicate) corrupted records
1632
+ #
1633
+ if (
1634
+ self.number_of_mod_records
1635
+ > self.number_of_org_records * self.max_num_dup_per_rec
1636
+ ):
1637
+ raise Exception(
1638
+ 'Desired number of duplicates cannot be generated '
1639
+ + 'with given number of original records and maximum'
1640
+ + ' number of duplicates per original record'
1641
+ )
1642
+
1643
+ # Check if there are enough attributes given for modifications - - - - - -
1644
+ #
1645
+ if len(self.attr_mod_prob_dict) < self.num_mod_per_rec:
1646
+ raise Exception(
1647
+ 'Not enough attribute modifications given to obtain'
1648
+ + ' the desired number of modifications per record'
1649
+ )
1650
+
1651
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1652
+ # Create a distribution for the number of duplicates for an original record
1653
+ #
1654
+ num_dup = 1
1655
+ prob_sum = 0.0
1656
+ self.prob_dist_list = [(num_dup, prob_sum)]
1657
+
1658
+ if self.num_dup_dist == 'uniform':
1659
+ uniform_val = 1.0 / float(self.max_num_dup_per_rec)
1660
+
1661
+ for i in range(self.max_num_dup_per_rec - 1):
1662
+ num_dup += 1
1663
+ self.prob_dist_list.append(
1664
+ (num_dup, uniform_val + self.prob_dist_list[-1][1])
1665
+ )
1666
+
1667
+ elif self.num_dup_dist == 'poisson':
1668
+
1669
+ def fac(n): # Factorial of an integer number (recursive calculation)
1670
+ if n > 1.0:
1671
+ return n * fac(n - 1.0)
1672
+ else:
1673
+ return 1.0
1674
+
1675
+ poisson_num = [] # A list of poisson numbers
1676
+ poisson_sum = 0.0 # The sum of all poisson number
1677
+
1678
+ # The mean (lambda) for the poisson numbers
1679
+ #
1680
+ mean = 1.0 + (
1681
+ float(self.number_of_mod_records) / float(self.number_of_org_records)
1682
+ )
1683
+
1684
+ for i in range(self.max_num_dup_per_rec):
1685
+ poisson_num.append((math.exp(-mean) * (mean**i)) / fac(i))
1686
+ poisson_sum += poisson_num[-1]
1687
+
1688
+ for i in range(self.max_num_dup_per_rec): # Scale so they sum up to 1.0
1689
+ poisson_num[i] = poisson_num[i] / poisson_sum
1690
+
1691
+ for i in range(self.max_num_dup_per_rec - 1):
1692
+ num_dup += 1
1693
+ self.prob_dist_list.append(
1694
+ (num_dup, poisson_num[i] + self.prob_dist_list[-1][1])
1695
+ )
1696
+
1697
+ elif self.num_dup_dist == 'zipf':
1698
+ zipf_theta = 0.5
1699
+
1700
+ denom = 0.0
1701
+ for i in range(self.number_of_org_records):
1702
+ denom += 1.0 / (i + 1) ** (1.0 - zipf_theta)
1703
+
1704
+ zipf_c = 1.0 / denom
1705
+ zipf_num = [] # A list of Zipf numbers
1706
+ zipf_sum = 0.0 # The sum of all Zipf number
1707
+
1708
+ for i in range(self.max_num_dup_per_rec):
1709
+ zipf_num.append(zipf_c / ((i + 1) ** (1.0 - zipf_theta)))
1710
+ zipf_sum += zipf_num[-1]
1711
+
1712
+ for i in range(self.max_num_dup_per_rec): # Scale so they sum up to 1.0
1713
+ zipf_num[i] = zipf_num[i] / zipf_sum
1714
+
1715
+ for i in range(self.max_num_dup_per_rec - 1):
1716
+ num_dup += 1
1717
+ self.prob_dist_list.append(
1718
+ (num_dup, zipf_num[i] + self.prob_dist_list[-1][1])
1719
+ )
1720
+
1721
+ print('Probability distribution for number of duplicates per record:')
1722
+ print(self.prob_dist_list)
1723
+
1724
+ # Check probability list for attributes and dictionary for attributes - - -
1725
+ # if they sum to 1.0
1726
+ #
1727
+ attr_prob_sum = sum(self.attr_mod_prob_dict.values())
1728
+ if abs(attr_prob_sum - 1.0) > 0.0000001:
1729
+ raise Exception(
1730
+ 'Attribute modification probabilities do not sum '
1731
+ + 'to 1.0: %f' % (attr_prob_sum)
1732
+ )
1733
+ for attr_name in self.attr_mod_prob_dict:
1734
+ assert (
1735
+ self.attr_mod_prob_dict[attr_name] >= 0.0
1736
+ ), 'Negative probability given in "attr_mod_prob_dict"'
1737
+ if attr_name not in self.attribute_name_list:
1738
+ raise Exception(
1739
+ 'Attribute name "%s" in "attr_mod_prob_dict" not ' % (attr_name)
1740
+ + 'listed in "attribute_name_list"'
1741
+ )
1742
+
1743
+ # Check details of attribute modification data dictionary
1744
+ #
1745
+ for (attr_name, attr_mod_data_list) in self.attr_mod_data_dict.items():
1746
+ if attr_name not in self.attribute_name_list:
1747
+ raise Exception(
1748
+ 'Attribute name "%s" in "attr_mod_data_dict" not ' % (attr_name)
1749
+ + 'listed in "attribute_name_list"'
1750
+ )
1751
+ basefunctions.check_is_list('attr_mod_data_dict entry', attr_mod_data_list)
1752
+ prob_sum = 0.0
1753
+ for list_elem in attr_mod_data_list:
1754
+ basefunctions.check_is_tuple('attr_mod_data_dict list element', list_elem)
1755
+ assert len(list_elem) == 2, (
1756
+ 'attr_mod_data_dict list element does ' + 'not consist of two elements'
1757
+ )
1758
+ basefunctions.check_is_normalised(
1759
+ 'attr_mod_data_dict list probability', list_elem[0]
1760
+ )
1761
+ prob_sum += list_elem[0]
1762
+ if abs(prob_sum - 1.0) > 0.0000001:
1763
+ raise Exception(
1764
+ 'Probability sum is no 1.0 for attribute "%s"' % (attr_name)
1765
+ )
1766
+
1767
+ # Generate a list with attribute probabilities summed for easy selection
1768
+ #
1769
+ self.attr_mod_prob_list = []
1770
+ prob_sum = 0
1771
+ for (attr_name, attr_prob) in self.attr_mod_prob_dict.items():
1772
+ prob_sum += attr_prob
1773
+ self.attr_mod_prob_list.append([prob_sum, attr_name])
1774
+ # print self.attr_mod_prob_list
1775
+
1776
+ # ---------------------------------------------------------------------------
1777
+
1778
+ def corrupt_records(self, rec_dict):
1779
+ """Method to corrupt modify the records in the given record dictionary
1780
+ according to the settings of the data set corruptor.
1781
+ """
1782
+
1783
+ # Check if number of records given is what is expected
1784
+ #
1785
+ assert self.number_of_org_records == len(
1786
+ rec_dict
1787
+ ), 'Illegal number of records to modify given'
1788
+
1789
+ # First generate for each original record the number of duplicates that are
1790
+ # to be generated for it.
1791
+ #
1792
+ dup_rec_num_dict = {} # Keys are the record identifiers of the original
1793
+ # records, value their number of duplicates
1794
+ total_num_dups = 0 # Total number of duplicates generated
1795
+
1796
+ org_rec_id_list = list(rec_dict.keys())
1797
+ random.shuffle(org_rec_id_list)
1798
+
1799
+ org_rec_i = 0 # Loop counter over which record to assign duplicates to
1800
+
1801
+ while (org_rec_i < self.number_of_org_records) and (
1802
+ total_num_dups < self.number_of_mod_records
1803
+ ):
1804
+
1805
+ # Randomly choose how many duplicates to create for this original record
1806
+ #
1807
+ r = random.random() # Random number between 0.0 and 1.0
1808
+ ind = -1
1809
+ while self.prob_dist_list[ind][1] > r:
1810
+ ind -= 1
1811
+ num_dups = self.prob_dist_list[ind][0]
1812
+
1813
+ assert (num_dups > 0) and (num_dups <= self.max_num_dup_per_rec)
1814
+
1815
+ # Check if there are still 'enough' duplicates to generate
1816
+ #
1817
+ if num_dups <= (self.number_of_mod_records - total_num_dups):
1818
+
1819
+ # Select next record for which to generate duplicates
1820
+ #
1821
+ org_rec_id = org_rec_id_list[org_rec_i]
1822
+ org_rec_i += 1
1823
+ dup_rec_num_dict[org_rec_id] = num_dups
1824
+ total_num_dups += num_dups
1825
+
1826
+ assert total_num_dups == sum(dup_rec_num_dict.values())
1827
+
1828
+ # Deal with the case where every original record has a number of duplicates
1829
+ # but not enough duplicates are generated in total
1830
+ #
1831
+ org_rec_id_list = list(rec_dict.keys())
1832
+ random.shuffle(org_rec_id_list)
1833
+
1834
+ while total_num_dups < self.number_of_mod_records:
1835
+ org_rec_id = random.choice(org_rec_id_list)
1836
+
1837
+ # If possible, increase number of duplicates for this record by 1
1838
+ #
1839
+ if dup_rec_num_dict[org_rec_id] < self.max_num_dup_per_rec:
1840
+ dup_rec_num_dict[org_rec_id] = dup_rec_num_dict[org_rec_id] + 1
1841
+ total_num_dups += 1
1842
+
1843
+ assert sum(dup_rec_num_dict.values()) == self.number_of_mod_records
1844
+
1845
+ # Generate a histogram of number of duplicates per record
1846
+ #
1847
+ dup_histo = {}
1848
+ for (org_rec_id_to_mod, num_dups) in dup_rec_num_dict.items():
1849
+ dup_count = dup_histo.get(num_dups, 0) + 1
1850
+ dup_histo[num_dups] = dup_count
1851
+ print(
1852
+ 'Distribution of number of original records with certain number '
1853
+ + 'of duplicates:'
1854
+ )
1855
+ dup_histo_keys = list(dup_histo.keys())
1856
+ dup_histo_keys.sort()
1857
+ for num_dups in dup_histo_keys:
1858
+ print(
1859
+ ' Number of records with %d duplicates: %d'
1860
+ % (num_dups, dup_histo[num_dups])
1861
+ )
1862
+ print()
1863
+
1864
+ num_dup_rec_created = 0 # Count how many duplicate records have been
1865
+ # generated
1866
+
1867
+ # Main loop over all original records for which to generate duplicates - -
1868
+ #
1869
+ for (org_rec_id_to_mod, num_dups) in dup_rec_num_dict.items():
1870
+ assert (num_dups > 0) and (num_dups <= self.max_num_dup_per_rec)
1871
+
1872
+ print()
1873
+ print(
1874
+ 'Generating %d modified (duplicate) records for record "%s"'
1875
+ % (num_dups, org_rec_id_to_mod)
1876
+ )
1877
+
1878
+ rec_to_mod_list = rec_dict[org_rec_id_to_mod]
1879
+
1880
+ d = 0 # Loop counter for duplicates for this record
1881
+
1882
+ this_dup_rec_list = [] # A list of all duplicates for this record
1883
+
1884
+ # Loop to create duplicate records - - - - - - - - - - - - - - - - - - - -
1885
+ #
1886
+ while d < num_dups:
1887
+
1888
+ # Create a duplicate of the original record
1889
+ #
1890
+ dup_rec_list = rec_to_mod_list[:] # Make copy of original record
1891
+
1892
+ org_rec_num = org_rec_id_to_mod.split('-')[1]
1893
+ dup_rec_id = 'rec-%s-dup-%d' % (org_rec_num, d)
1894
+ print(
1895
+ ' Generate identifier for duplicate record based on "%s": %s'
1896
+ % (org_rec_id_to_mod, dup_rec_id)
1897
+ )
1898
+
1899
+ # Count the number of modifications in this record (counted as the
1900
+ # number of modified attributes)
1901
+ #
1902
+ num_mod_in_record = 0
1903
+
1904
+ # Set the attribute modification counters to zero for all attributes
1905
+ # that can be modified
1906
+ #
1907
+ attr_mod_count_dict = {}
1908
+ for attr_name in self.attr_mod_prob_dict.keys():
1909
+ attr_mod_count_dict[attr_name] = 0
1910
+
1911
+ # Abort generating modifications after a larger number of tries to
1912
+ # prevent an endless loop
1913
+ #
1914
+ max_num_tries = self.num_mod_per_rec * 10
1915
+ num_tries = 0
1916
+
1917
+ # Now apply desired number of modifications to this record
1918
+ #
1919
+ while (num_mod_in_record < self.num_mod_per_rec) and (
1920
+ num_tries < max_num_tries
1921
+ ):
1922
+
1923
+ # Randomly modify an attribute value
1924
+ #
1925
+ r = random.random() # Random value between 0.0 and 1.0
1926
+ i = 0
1927
+ while self.attr_mod_prob_list[i][0] < r:
1928
+ i += 1
1929
+ mod_attr_name = self.attr_mod_prob_list[i][1]
1930
+
1931
+ if attr_mod_count_dict[mod_attr_name] < self.max_num_mod_per_attr:
1932
+ mod_attr_name_index = self.attribute_name_list.index(mod_attr_name)
1933
+ mod_attr_val = dup_rec_list[mod_attr_name_index]
1934
+
1935
+ # Select an attribute to modify according to probability
1936
+ # distribution of corruption methods
1937
+ #
1938
+ attr_mod_data_list = self.attr_mod_data_dict[mod_attr_name]
1939
+
1940
+ r = random.random() # Random value between 0.0 and 1.0
1941
+ p_sum = attr_mod_data_list[0][0]
1942
+ i = 0
1943
+ while r >= p_sum:
1944
+ i += 1
1945
+ p_sum += attr_mod_data_list[i][0]
1946
+ corruptor_method = attr_mod_data_list[i][1]
1947
+
1948
+ # Modify the value from the selected attribute
1949
+ #
1950
+ new_attr_val = corruptor_method.corrupt_value(mod_attr_val)
1951
+
1952
+ org_attr_val = rec_to_mod_list[mod_attr_name_index]
1953
+
1954
+ # If the modified value is different insert it back into modified
1955
+ # record
1956
+ #
1957
+ if new_attr_val != org_attr_val:
1958
+ print(' Selected attribute for modification:', mod_attr_name)
1959
+ print(' Selected corruptor:', corruptor_method.name)
1960
+
1961
+ # The following weird string printing construct is to overcome
1962
+ # problems with printing non-ASCII characters
1963
+ #
1964
+ print(
1965
+ ' Original attribute value:',
1966
+ str([org_attr_val])[1:-1],
1967
+ )
1968
+ print(
1969
+ ' Modified attribute value:',
1970
+ str([new_attr_val])[1:-1],
1971
+ )
1972
+
1973
+ dup_rec_list[mod_attr_name_index] = new_attr_val
1974
+
1975
+ # One more modification for this attribute
1976
+ #
1977
+ attr_mod_count_dict[mod_attr_name] += 1
1978
+
1979
+ # The number of modifications in a record corresponds to the
1980
+ # number of modified attributes
1981
+ #
1982
+ num_mod_in_record = 0
1983
+
1984
+ for num_attr_mods in attr_mod_count_dict.values():
1985
+ if num_attr_mods > 0:
1986
+ num_mod_in_record += 1 # One more modification
1987
+ assert num_mod_in_record <= self.num_mod_per_rec
1988
+
1989
+ num_tries += 1 # One more try to modify record
1990
+
1991
+ # Check if this duplicate is different from all others for this original
1992
+ # record
1993
+ #
1994
+ is_diff = True # Flag to check if the latest duplicate is different
1995
+
1996
+ if this_dup_rec_list == []: # No duplicate so far
1997
+ this_dup_rec_list.append(dup_rec_list)
1998
+ else:
1999
+ for check_dup_rec in this_dup_rec_list:
2000
+ if check_dup_rec == dup_rec_list: # Same as a previous duplicate
2001
+ is_diff = False
2002
+ print('Same duplicate:', check_dup_rec)
2003
+ print(' ', dup_rec_list)
2004
+
2005
+ if is_diff == True: # Only keep duplicate records that are different
2006
+
2007
+ # Safe the record into the overall record dictionary
2008
+ #
2009
+ rec_dict[dup_rec_id] = dup_rec_list
2010
+
2011
+ d += 1
2012
+ num_dup_rec_created += 1
2013
+
2014
+ print('Original record:')
2015
+ print(' ', rec_to_mod_list)
2016
+ print(
2017
+ 'Record with %d modified attributes' % (num_mod_in_record),
2018
+ )
2019
+ attr_mod_str = '('
2020
+ for a in self.attribute_name_list:
2021
+ if attr_mod_count_dict.get(a, 0) > 0:
2022
+ attr_mod_str += '%d in %s, ' % (attr_mod_count_dict[a], a)
2023
+ attr_mod_str = attr_mod_str[:-1] + '):'
2024
+ print(attr_mod_str)
2025
+ print(' ', dup_rec_list)
2026
+ print(
2027
+ '%d of %d duplicate records generated so far'
2028
+ % (num_dup_rec_created, self.number_of_mod_records)
2029
+ )
2030
+ print()
2031
+
2032
+ return rec_dict
2033
+
2034
+
2035
+ # =============================================================================
geco_data_generator/data/age-freq.csv ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Example age distribution
2
+ #
3
+
4
+ 0,10
5
+ 1,11
6
+ 2,12
7
+ 3,14
8
+ 4,15
9
+ 5,
10
+ 6,
11
+ 7,
12
+
13
+ ..
14
+
15
+ 99,1
16
+ 100,0.5
17
+ 120,1
geco_data_generator/data/city-gender-bloodpressure.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ canberra,10
3
+ m,50,normal,120,12,None,None
4
+ f,50,normal,110,23,None,None
5
+ sydney,45
6
+ m,45,normal,130,24,None,None
7
+ f,55,normal,105,40,None,None
8
+ melbourne,35
9
+ f,45,normal,105,16,None,None
10
+ m,55,normal,117,19,None,None
11
+ hobart,10
12
+ f,55,normal,100,10,None,None
13
+ m,48,normal,105,24,None,None
geco_data_generator/data/gender-bloodpressure.csv ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Categorical-continuous attribute gender- blood pressure
2
+ #
3
+ m,40,normal,120,12,None,None
4
+ f,35,normal,110,23,None,None
5
+ n/a,25,normal,115,18,None,None
geco_data_generator/data/gender-city-income.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Look-up file for generating a compound attribute: gender / city / income. For details
2
+ # see module generator.py, class GenerateCateCateContCompoundAttribute
3
+ #
4
+ male,60
5
+ canberra,20,uniform,50000,90000
6
+ sydney,30,normal,75000,50000,20000,None
7
+ melbourne,30,uniform,35000,200000
8
+ perth,20,normal,55000,250000,15000,None
9
+ female,40
10
+ canberra,10,normal,45000,10000,None,150000
11
+ sydney,40,uniform,60000,200000
12
+ melbourne,20,uniform,50000,1750000
13
+ brisbane,30,normal,55000,20000,20000,100000
geco_data_generator/data/gender-city-japanese.csv ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # The first line in this look-up file is a header line
2
+ # Look-up file for generating a compound attribute: gender / city. For details
3
+ # see module generator.py, class GenerateCateCateCompoundAttribute
4
+ #
5
+ �j��,60,���l,7, \
6
+ ����,30,���,45, \
7
+ ���,18
8
+ ����,40,���l,10,����,40, \
9
+ ���,20,��t,30,�Q�n,5,\
10
+ ���h,20
geco_data_generator/data/gender-city.csv ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cate1,count-cate1,cate2,count-cate2
2
+ # The first line in this look-up file is a header line
3
+ # Look-up file for generating a compound attribute: gender / city. For details
4
+ # see module generator.py, class GenerateCateCateCompoundAttribute
5
+ #
6
+ male,60,canberra,7, \
7
+ sydney,30,melbourne,45, \
8
+ perth,18
9
+ female,40,canberra,10,sydney,40, \
10
+ melbourne,20,brisbane,30,hobart,5,\
11
+ perth,20
geco_data_generator/data/gender-income-data.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #
2
+ male,55,10000-25000,15, 25000-50000,25, 50000-80000,20,\
3
+ 80000-120000,20, 120000-160000,10,160000-1000000,10
4
+ female,45,10000-25000,20, 25000-50000,35, 50000-80000,25,\
5
+ 80000-120000,10, 120000-160000,5,160000-1000000,5
6
+
geco_data_generator/data/gender-income.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Look-up file for generating a compound attribute: gender / income. For details
2
+ # see module generator.py, class GenerateCateContCompoundAttribute
3
+ #
4
+ m,30,uniform,20000,100000
5
+ f,40,normal,35000,100000,10000,None
6
+ na,30,normal,55000,45000,0,150000
geco_data_generator/data/givenname_f_freq.csv ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # givenname_f_freq.csv - Frequency table for female given names
3
+ #
4
+ # Sources: - Compiled from various web sites
5
+ #
6
+ # Last update: 11/04/2002, Peter Christen
7
+ # =============================================================================
8
+
9
+ # =============================================================================
10
+ # This table is in a two-column comma separated format, with the first column
11
+ # being the names and the second a corresponding frequency count.
12
+ # =============================================================================
13
+
14
+ aaliyah,1
15
+ abbey,9
16
+ abbie,2
17
+ abby,4
18
+ abigail,1
19
+ abii,1
20
+ adela,1
21
+ adele,1
22
+ adriana,1
23
+ aikaterina,1
24
+ aimee,2
25
+ ainsley,1
26
+ alaiyah,1
27
+ alana,9
28
+ alannah,2
29
+ aleesha,1
30
+ aleisha,2
31
+ alessandra,2
32
+ alessandria,1
33
+ alessia,1
34
+ alex,1
35
+ alexa-rose,1
36
+ alexandra,17
37
+ alexia,1
38
+ alexis,1
39
+ alia,2
40
+ alice,3
41
+ alicia,5
42
+ alisa,3
43
+ alisha,2
44
+ alison,1
45
+ alissa,2
46
+ alivia,1
47
+ aliza,1
48
+ allegra,1
49
+ alysha,2
50
+ alyssa,3
51
+ amalia,1
52
+ amaya,1
53
+ amber,11
54
+ ambrosia,1
55
+ amelia,5
56
+ amelie,1
57
+ amy,15
58
+ anari,1
59
+ anastasia,1
60
+ andie,1
61
+ andrea,1
62
+ aneka,1
63
+ angelica,2
64
+ angelina,1
65
+ angie,1
66
+ anika,1
67
+ anita,1
68
+ anna,1
69
+ annabel,4
70
+ annabella,1
71
+ annabelle,4
72
+ annalise,2
73
+ anneliese,1
74
+ annika,1
75
+ anthea,1
76
+ april,6
77
+ arabella,3
78
+ arki,1
79
+ asha,1
80
+ ashlee,1
81
+ ashleigh,4
82
+ ashley,1
83
+ ashlie,1
84
+ ashton,1
85
+ aurora,1
86
+ ava,2
87
+ ayla,1
88
+ bailee,3
89
+ bailey,3
90
+ belinda,1
91
+ bella,2
92
+ beth,1
93
+ bethanie,1
94
+ bethany,5
95
+ bianca,5
96
+ breana,1
97
+ bree,1
98
+ breeanne,1
99
+ breony,1
100
+ brianna,7
101
+ bridget,4
102
+ brielle,1
103
+ brigette,1
104
+ brigitte,1
105
+ briley,1
106
+ briony,1
107
+ bronte,1
108
+ brooke,11
109
+ brooklyn,3
110
+ brydee,1
111
+ caitlin,16
112
+ caitlyn,1
113
+ callie,1
114
+ cambell,1
115
+ caresse,1
116
+ carla,2
117
+ carly,2
118
+ carmen,1
119
+ casey,4
120
+ cassandra,3
121
+ cassidy,1
122
+ catherine,3
123
+ channing,1
124
+ chantelle,2
125
+ charlee,1
126
+ charli,1
127
+ charlie,3
128
+ charlize,1
129
+ charlotte,17
130
+ chelsea,12
131
+ chelsie,1
132
+ cheree,1
133
+ chevonne,1
134
+ cheyenne,1
135
+ chloe,23
136
+ christina,1
137
+ claire,3
138
+ claudia,3
139
+ clodagh,1
140
+ corie,1
141
+ courtney,5
142
+ crystal,1
143
+ daniella,2
144
+ danielle,5
145
+ danika,1
146
+ darcie,1
147
+ darcy,1
148
+ dayna,1
149
+ delaney,1
150
+ demie,1
151
+ desi,1
152
+ destynii,1
153
+ diamond,1
154
+ domenique,1
155
+ eboni,1
156
+ ebonie,1
157
+ ebony,7
158
+ elana,1
159
+ eleanor,1
160
+ eleni,1
161
+ elise,1
162
+ elisha,1
163
+ eliza,7
164
+ elizabeth,3
165
+ ella,15
166
+ elle,1
167
+ ellen,3
168
+ elli,1
169
+ ellie,4
170
+ ellorah,1
171
+ ellouise,1
172
+ elly,3
173
+ eloise,1
174
+ elysse,1
175
+ emalene,1
176
+ emerson,1
177
+ emiily,48
178
+ emma,16
179
+ emmerson,1
180
+ erin,14
181
+ esme,1
182
+ esther,1
183
+ eva,2
184
+ evangelia,1
185
+ faith,1
186
+ felicity,3
187
+ finn,2
188
+ fiona,1
189
+ freya,2
190
+ fyynlay,1
191
+ gabriella,1
192
+ gabrielle,4
193
+ gemaley,1
194
+ gemma,1
195
+ genevieve,1
196
+ georgia,19
197
+ georgina,1
198
+ giaan,1
199
+ gillian,1
200
+ giorgia,1
201
+ giuliana,1
202
+ grace,7
203
+ gracie,2
204
+ hana,1
205
+ hanna,1
206
+ hannah,17
207
+ harley,1
208
+ harriet,4
209
+ hattie,1
210
+ haylee,1
211
+ hayley,7
212
+ heather,1
213
+ helena,1
214
+ hollie,1
215
+ holly,14
216
+ imogen,9
217
+ india,2
218
+ indiana,2
219
+ indyana,1
220
+ irene,1
221
+ isabel,1
222
+ isabella,28
223
+ isabelle,9
224
+ isobel,1
225
+ jacinta,3
226
+ jacqueline,4
227
+ jacynta,1
228
+ jade,9
229
+ jaime,2
230
+ jaimee,1
231
+ jamilla,1
232
+ jaslyn,1
233
+ jasmin,1
234
+ jasmine,9
235
+ jasmyn,2
236
+ jayde,1
237
+ jayme,1
238
+ jazz,1
239
+ jemima,1
240
+ jemma,3
241
+ jenna,4
242
+ jennifer,1
243
+ jessica,25
244
+ jessie,1
245
+ jinni,1
246
+ joanna,1
247
+ jordan,2
248
+ jordyn,1
249
+ jorja,1
250
+ joselyn,1
251
+ josephine,2
252
+ julia,5
253
+ juliana,3
254
+ kaela,1
255
+ kailey,2
256
+ kaitlin,4
257
+ kaitlyn,2
258
+ kalli,1
259
+ kallie,1
260
+ karissa,1
261
+ karla,1
262
+ karlee,1
263
+ karli,2
264
+ kasey,1
265
+ kate,3
266
+ katelin,2
267
+ katelyn,6
268
+ katharine,1
269
+ katherine,1
270
+ kathleen,1
271
+ katie,2
272
+ kayla,5
273
+ kaysey,1
274
+ keahley,1
275
+ keeley,2
276
+ keelin,1
277
+ keely,2
278
+ keira,1
279
+ kelsea,1
280
+ kelsey,3
281
+ kelsy,1
282
+ kelsye,1
283
+ keziah,1
284
+ kiana,2
285
+ kiandra,1
286
+ kiara,2
287
+ kiarnee,1
288
+ kiera,2
289
+ kierra,1
290
+ kimberley,1
291
+ kimberly,1
292
+ kira,2
293
+ kiria,1
294
+ kirra,5
295
+ kirrah,1
296
+ koula,1
297
+ kristen,2
298
+ kristin,1
299
+ krystin,1
300
+ kyah,1
301
+ kylee,3
302
+ kylie,1
303
+ kyra,1
304
+ lacey,1
305
+ laetitia,1
306
+ laklynn,1
307
+ lani,1
308
+ lara,7
309
+ larissa,2
310
+ laura,5
311
+ lauren,15
312
+ layla,2
313
+ leah,3
314
+ lexie,1
315
+ lia,1
316
+ liana,1
317
+ libby,1
318
+ lilian,1
319
+ lillian,1
320
+ lillianna,1
321
+ lilly,1
322
+ lily,14
323
+ livia,1
324
+ logan,1
325
+ louise,2
326
+ lucinda,1
327
+ lucy,13
328
+ lushia,1
329
+ lydia,1
330
+ lynae,1
331
+ macey,1
332
+ mackenzi,1
333
+ mackenzie,1
334
+ macy,1
335
+ madalyn,1
336
+ maddison,4
337
+ madeleine,6
338
+ madeline,8
339
+ madelyn,1
340
+ madison,16
341
+ maggie,1
342
+ makayla,2
343
+ makenzi,1
344
+ makenzie,1
345
+ maliah,1
346
+ maria,1
347
+ marianne,1
348
+ marlee,1
349
+ marleigh,1
350
+ marley,1
351
+ mary,1
352
+ mathilde,1
353
+ matilda,4
354
+ matisse,2
355
+ maya,3
356
+ meagan,1
357
+ meg,3
358
+ megan,3
359
+ meggie,1
360
+ melanie,1
361
+ melinda,1
362
+ melissa,1
363
+ mhary,1
364
+ mia,12
365
+ micaela,1
366
+ michaela,3
367
+ michelle,1
368
+ mikaela,2
369
+ mikayla,6
370
+ mikhaili,1
371
+ mikhayla,2
372
+ mila,1
373
+ millie,1
374
+ miranda,1
375
+ mollie,1
376
+ molly,5
377
+ monique,4
378
+ montana,4
379
+ montanna,1
380
+ morgan,1
381
+ mya,1
382
+ mystique,1
383
+ nacoya,1
384
+ naomi,4
385
+ nasyah,1
386
+ natalee,1
387
+ natalia,3
388
+ natalie,1
389
+ natasha,2
390
+ natassia,1
391
+ nell,1
392
+ nellie,1
393
+ nemesia,1
394
+ neneh,1
395
+ neve,1
396
+ niamh,1
397
+ nicola,1
398
+ nicole,2
399
+ nikita,2
400
+ nikki,2
401
+ olivia,18
402
+ pace,1
403
+ paige,4
404
+ pakita,1
405
+ paris,3
406
+ pascale,1
407
+ peta,1
408
+ petreece,1
409
+ peyton,1
410
+ phoebe,5
411
+ pia,1
412
+ polly,1
413
+ portia,1
414
+ prudence,1
415
+ rachael,1
416
+ rachel,7
417
+ raquel,1
418
+ rebecca,4
419
+ rebekah,2
420
+ reegan,1
421
+ reganne,1
422
+ renai,1
423
+ renee,2
424
+ rhiannon,1
425
+ riley,1
426
+ roberta,1
427
+ roisin,1
428
+ rosa,1
429
+ rosie,3
430
+ ruby,14
431
+ ryleh,1
432
+ saara,1
433
+ samantha,12
434
+ samara,1
435
+ sana,1
436
+ sara,3
437
+ sara-louise,1
438
+ sarah,17
439
+ sarsha,1
440
+ sascha,2
441
+ saskia,1
442
+ savannah,1
443
+ schkirra,1
444
+ seanna,1
445
+ serena,1
446
+ shae,1
447
+ shai,2
448
+ shakira,1
449
+ shakirah,1
450
+ shana,1
451
+ shanaye,1
452
+ shandril,1
453
+ shannon,1
454
+ shantal,1
455
+ shelbey,1
456
+ shelby,2
457
+ shenae,1
458
+ shona,1
459
+ sian,1
460
+ sidonie,1
461
+ sienna,3
462
+ simone,2
463
+ skye,1
464
+ sonja,2
465
+ sophie,30
466
+ stella,1
467
+ stephanie,10
468
+ summer,1
469
+ sybella,1
470
+ sylvie,1
471
+ taalia,1
472
+ tabitha,1
473
+ tahlia,4
474
+ tahnee,1
475
+ tahni,1
476
+ takara,1
477
+ takeisha,1
478
+ talena,1
479
+ talia,6
480
+ taliah,3
481
+ talissa,1
482
+ talyah,1
483
+ tansy,1
484
+ tanyshah,1
485
+ tara,10
486
+ tarnee,1
487
+ tarnia,1
488
+ tarshya,1
489
+ tayah,2
490
+ tayla,2
491
+ taylah,7
492
+ taylor,3
493
+ taylor-saige,1
494
+ teagan,3
495
+ teaha,1
496
+ teal,1
497
+ teegan,1
498
+ tegan,1
499
+ teileah,1
500
+ teneille,1
501
+ tenille,1
502
+ teresa,1
503
+ tess,2
504
+ tia,1
505
+ tiahana,1
506
+ tiahnee,1
507
+ tiana,3
508
+ tiarna,3
509
+ tiffany,1
510
+ timara,1
511
+ tori,1
512
+ trinity,1
513
+ tuscany,1
514
+ tylah,1
515
+ tyler,1
516
+ vanessa,3
517
+ vendula,1
518
+ vianca,1
519
+ victoria,3
520
+ willow,1
521
+ xani,1
522
+ xanthe,1
523
+ yana,1
524
+ yasmin,3
525
+ ysobel,1
526
+ zali,3
527
+ zara,2
528
+ zarlia,1
529
+ zoe,4
geco_data_generator/data/givenname_freq.csv ADDED
@@ -0,0 +1,888 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # givenname_freq.csv - Frequency table for given names
3
+ #
4
+ # Sources: - Compiled from various web sites
5
+ #
6
+ # Last update: 22/03/2012, Peter Christen
7
+ # =============================================================================
8
+
9
+ # =============================================================================
10
+ # This table is in a two-column comma separated format, with the first column
11
+ # being the names and the second a corresponding frequency count.
12
+ # =============================================================================
13
+
14
+ aaliyah,1
15
+ abbey,9
16
+ abbie,2
17
+ abby,4
18
+ abigail,1
19
+ abii,1
20
+ adela,1
21
+ adele,1
22
+ adriana,1
23
+ aikaterina,1
24
+ aimee,2
25
+ ainsley,1
26
+ alaiyah,1
27
+ alana,9
28
+ alannah,2
29
+ aleesha,1
30
+ aleisha,2
31
+ alessandra,2
32
+ alessandria,1
33
+ alessia,1
34
+ alexa-rose,1
35
+ alexandra,17
36
+ alexia,1
37
+ alexis,1
38
+ alia,2
39
+ alice,3
40
+ alicia,5
41
+ alisa,3
42
+ alisha,2
43
+ alison,1
44
+ alissa,2
45
+ alivia,1
46
+ aliza,1
47
+ allegra,1
48
+ alysha,2
49
+ alyssa,3
50
+ amalia,1
51
+ amaya,1
52
+ amber,11
53
+ ambrosia,1
54
+ amelia,5
55
+ amelie,1
56
+ amy,15
57
+ anari,1
58
+ anastasia,1
59
+ andie,1
60
+ andrea,1
61
+ aneka,1
62
+ angelica,2
63
+ angelina,1
64
+ angie,1
65
+ anika,1
66
+ anita,1
67
+ anna,1
68
+ annabel,4
69
+ annabella,1
70
+ annabelle,4
71
+ annalise,2
72
+ anneliese,1
73
+ annika,1
74
+ anthea,1
75
+ april,6
76
+ arabella,3
77
+ arki,1
78
+ asha,1
79
+ ashlee,1
80
+ ashleigh,4
81
+ #ashley,1
82
+ ashlie,1
83
+ #ashton,1
84
+ aurora,1
85
+ ava,2
86
+ ayla,1
87
+ bailee,3
88
+ #bailey,3
89
+ belinda,1
90
+ bella,2
91
+ beth,1
92
+ bethanie,1
93
+ bethany,5
94
+ bianca,5
95
+ breana,1
96
+ bree,1
97
+ breeanne,1
98
+ breony,1
99
+ brianna,7
100
+ bridget,4
101
+ brielle,1
102
+ brigette,1
103
+ brigitte,1
104
+ briley,1
105
+ briony,1
106
+ bronte,1
107
+ brooke,11
108
+ brooklyn,3
109
+ brydee,1
110
+ caitlin,16
111
+ caitlyn,1
112
+ callie,1
113
+ cambell,1
114
+ caresse,1
115
+ carla,2
116
+ carly,2
117
+ carmen,1
118
+ #casey,4
119
+ cassandra,3
120
+ cassidy,1
121
+ catherine,3
122
+ channing,1
123
+ chantelle,2
124
+ charlee,1
125
+ charli,1
126
+ #charlie,3
127
+ charlize,1
128
+ charlotte,17
129
+ chelsea,12
130
+ chelsie,1
131
+ cheree,1
132
+ chevonne,1
133
+ cheyenne,1
134
+ chloe,23
135
+ christina,1
136
+ claire,3
137
+ claudia,3
138
+ clodagh,1
139
+ corie,1
140
+ courtney,5
141
+ crystal,1
142
+ daniella,2
143
+ danielle,5
144
+ danika,1
145
+ darcie,1
146
+ #darcy,1
147
+ dayna,1
148
+ delaney,1
149
+ demie,1
150
+ desi,1
151
+ destynii,1
152
+ diamond,1
153
+ domenique,1
154
+ eboni,1
155
+ ebonie,1
156
+ ebony,7
157
+ elana,1
158
+ eleanor,1
159
+ eleni,1
160
+ elise,1
161
+ elisha,1
162
+ eliza,7
163
+ elizabeth,3
164
+ ella,15
165
+ elle,1
166
+ ellen,3
167
+ elli,1
168
+ ellie,4
169
+ ellorah,1
170
+ ellouise,1
171
+ elly,3
172
+ eloise,1
173
+ elysse,1
174
+ emalene,1
175
+ emerson,1
176
+ emiily,48
177
+ emma,16
178
+ emmerson,1
179
+ erin,14
180
+ esme,1
181
+ esther,1
182
+ eva,2
183
+ evangelia,1
184
+ faith,1
185
+ felicity,3
186
+ #finn,2
187
+ fiona,1
188
+ freya,2
189
+ fyynlay,1
190
+ gabriella,1
191
+ gabrielle,4
192
+ gemaley,1
193
+ gemma,1
194
+ genevieve,1
195
+ georgia,19
196
+ georgina,1
197
+ giaan,1
198
+ gillian,1
199
+ giorgia,1
200
+ giuliana,1
201
+ grace,7
202
+ gracie,2
203
+ hana,1
204
+ hanna,1
205
+ hannah,17
206
+ #harley,1
207
+ harriet,4
208
+ hattie,1
209
+ haylee,1
210
+ hayley,7
211
+ heather,1
212
+ helena,1
213
+ hollie,1
214
+ holly,14
215
+ imogen,9
216
+ india,2
217
+ indiana,2
218
+ indyana,1
219
+ irene,1
220
+ isabel,1
221
+ isabella,28
222
+ isabelle,9
223
+ isobel,1
224
+ jacinta,3
225
+ jacqueline,4
226
+ jacynta,1
227
+ jade,9
228
+ jaime,2
229
+ jaimee,1
230
+ jamilla,1
231
+ jaslyn,1
232
+ jasmin,1
233
+ jasmine,9
234
+ jasmyn,2
235
+ #jayde,1
236
+ jayme,1
237
+ jazz,1
238
+ jemima,1
239
+ jemma,3
240
+ jenna,4
241
+ jennifer,1
242
+ jessica,25
243
+ jessie,1
244
+ jinni,1
245
+ joanna,1
246
+ #jordan,2
247
+ jordyn,1
248
+ jorja,1
249
+ joselyn,1
250
+ josephine,2
251
+ julia,5
252
+ juliana,3
253
+ kaela,1
254
+ kailey,2
255
+ kaitlin,4
256
+ kaitlyn,2
257
+ kalli,1
258
+ kallie,1
259
+ karissa,1
260
+ karla,1
261
+ karlee,1
262
+ karli,2
263
+ kasey,1
264
+ kate,3
265
+ katelin,2
266
+ katelyn,6
267
+ katharine,1
268
+ katherine,1
269
+ kathleen,1
270
+ katie,2
271
+ kayla,5
272
+ kaysey,1
273
+ keahley,1
274
+ keeley,2
275
+ keelin,1
276
+ keely,2
277
+ keira,1
278
+ kelsea,1
279
+ kelsey,3
280
+ kelsy,1
281
+ kelsye,1
282
+ keziah,1
283
+ kiana,2
284
+ kiandra,1
285
+ kiara,2
286
+ kiarnee,1
287
+ kiera,2
288
+ kierra,1
289
+ kimberley,1
290
+ kimberly,1
291
+ kira,2
292
+ kiria,1
293
+ kirra,5
294
+ kirrah,1
295
+ koula,1
296
+ kristen,2
297
+ kristin,1
298
+ krystin,1
299
+ kyah,1
300
+ kylee,3
301
+ kylie,1
302
+ kyra,1
303
+ lacey,1
304
+ laetitia,1
305
+ laklynn,1
306
+ lani,1
307
+ lara,7
308
+ larissa,2
309
+ laura,5
310
+ lauren,15
311
+ layla,2
312
+ leah,3
313
+ lexie,1
314
+ lia,1
315
+ liana,1
316
+ libby,1
317
+ lilian,1
318
+ lillian,1
319
+ lillianna,1
320
+ lilly,1
321
+ lily,14
322
+ livia,1
323
+ #logan,1
324
+ louise,2
325
+ lucinda,1
326
+ lucy,13
327
+ lushia,1
328
+ lydia,1
329
+ lynae,1
330
+ macey,1
331
+ mackenzi,1
332
+ #mackenzie,1
333
+ macy,1
334
+ madalyn,1
335
+ maddison,4
336
+ madeleine,6
337
+ madeline,8
338
+ madelyn,1
339
+ madison,16
340
+ maggie,1
341
+ makayla,2
342
+ makenzi,1
343
+ makenzie,1
344
+ maliah,1
345
+ maria,1
346
+ marianne,1
347
+ marlee,1
348
+ marleigh,1
349
+ marley,1
350
+ mary,1
351
+ mathilde,1
352
+ matilda,4
353
+ matisse,2
354
+ maya,3
355
+ meagan,1
356
+ meg,3
357
+ megan,3
358
+ meggie,1
359
+ melanie,1
360
+ melinda,1
361
+ melissa,1
362
+ mhary,1
363
+ mia,12
364
+ micaela,1
365
+ michaela,3
366
+ michelle,1
367
+ mikaela,2
368
+ mikayla,6
369
+ mikhaili,1
370
+ mikhayla,2
371
+ mila,1
372
+ millie,1
373
+ miranda,1
374
+ mollie,1
375
+ molly,5
376
+ monique,4
377
+ montana,4
378
+ montanna,1
379
+ morgan,1
380
+ mya,1
381
+ mystique,1
382
+ nacoya,1
383
+ naomi,4
384
+ nasyah,1
385
+ natalee,1
386
+ natalia,3
387
+ natalie,1
388
+ natasha,2
389
+ natassia,1
390
+ nell,1
391
+ nellie,1
392
+ nemesia,1
393
+ neneh,1
394
+ neve,1
395
+ niamh,1
396
+ nicola,1
397
+ nicole,2
398
+ nikita,2
399
+ nikki,2
400
+ olivia,18
401
+ pace,1
402
+ paige,4
403
+ pakita,1
404
+ paris,3
405
+ pascale,1
406
+ peta,1
407
+ petreece,1
408
+ peyton,1
409
+ phoebe,5
410
+ pia,1
411
+ polly,1
412
+ portia,1
413
+ prudence,1
414
+ rachael,1
415
+ rachel,7
416
+ raquel,1
417
+ rebecca,4
418
+ rebekah,2
419
+ reegan,1
420
+ reganne,1
421
+ renai,1
422
+ renee,2
423
+ rhiannon,1
424
+ #riley,1
425
+ roberta,1
426
+ roisin,1
427
+ rosa,1
428
+ rosie,3
429
+ ruby,14
430
+ ryleh,1
431
+ saara,1
432
+ samantha,12
433
+ samara,1
434
+ sana,1
435
+ sara,3
436
+ sara-louise,1
437
+ sarah,17
438
+ sarsha,1
439
+ sascha,2
440
+ saskia,1
441
+ savannah,1
442
+ schkirra,1
443
+ seanna,1
444
+ serena,1
445
+ shae,1
446
+ shai,2
447
+ shakira,1
448
+ shakirah,1
449
+ shana,1
450
+ shanaye,1
451
+ shandril,1
452
+ #shannon,1
453
+ shantal,1
454
+ shelbey,1
455
+ shelby,2
456
+ shenae,1
457
+ shona,1
458
+ sian,1
459
+ sidonie,1
460
+ sienna,3
461
+ simone,2
462
+ skye,1
463
+ sonja,2
464
+ sophie,30
465
+ stella,1
466
+ stephanie,10
467
+ summer,1
468
+ sybella,1
469
+ sylvie,1
470
+ taalia,1
471
+ tabitha,1
472
+ tahlia,4
473
+ tahnee,1
474
+ tahni,1
475
+ takara,1
476
+ takeisha,1
477
+ talena,1
478
+ talia,6
479
+ taliah,3
480
+ talissa,1
481
+ talyah,1
482
+ tansy,1
483
+ tanyshah,1
484
+ tara,10
485
+ tarnee,1
486
+ tarnia,1
487
+ tarshya,1
488
+ tayah,2
489
+ tayla,2
490
+ taylah,7
491
+ taylor,3
492
+ taylor-saige,1
493
+ teagan,3
494
+ teaha,1
495
+ teal,1
496
+ teegan,1
497
+ tegan,1
498
+ teileah,1
499
+ teneille,1
500
+ tenille,1
501
+ teresa,1
502
+ tess,2
503
+ tia,1
504
+ tiahana,1
505
+ tiahnee,1
506
+ tiana,3
507
+ tiarna,3
508
+ tiffany,1
509
+ timara,1
510
+ tori,1
511
+ trinity,1
512
+ tuscany,1
513
+ tylah,1
514
+ #tyler,1
515
+ vanessa,3
516
+ vendula,1
517
+ vianca,1
518
+ victoria,3
519
+ willow,1
520
+ xani,1
521
+ xanthe,1
522
+ yana,1
523
+ yasmin,3
524
+ ysobel,1
525
+ zali,3
526
+ zara,2
527
+ zarlia,1
528
+ zoe,4
529
+ aaron,2
530
+ adam,10
531
+ adrian,1
532
+ aidan,6
533
+ aiden,3
534
+ aidyn,1
535
+ ajay,1
536
+ alec,1
537
+ alex,4
538
+ alexander,15
539
+ aloysius,1
540
+ andrew,10
541
+ angus,3
542
+ anthony,3
543
+ anton,1
544
+ antonio,1
545
+ archer,2
546
+ archie,2
547
+ arren,1
548
+ ash,1
549
+ ashley,1
550
+ ashton,1
551
+ ayden,1
552
+ bailey,11
553
+ bailley,1
554
+ barkly,1
555
+ barnaby,2
556
+ baxter,1
557
+ bayden,1
558
+ bayley,2
559
+ beau,2
560
+ ben,2
561
+ benedict,1
562
+ benjamin,31
563
+ bertie,1
564
+ billy,1
565
+ blade,1
566
+ blaize,1
567
+ blake,11
568
+ blakeston,1
569
+ blayke,1
570
+ bodhi,1
571
+ bradley,3
572
+ braedon,3
573
+ braiden,1
574
+ brandon,2
575
+ brayden,2
576
+ brendan,1
577
+ brett,1
578
+ brinley,1
579
+ brock,1
580
+ brodee,2
581
+ brodie,2
582
+ brody,3
583
+ bryce,2
584
+ brydon,1
585
+ byron,1
586
+ cade,1
587
+ cain,2
588
+ caleb,10
589
+ callum,7
590
+ calvin,1
591
+ cameron,9
592
+ campbell,4
593
+ carlin,2
594
+ casey,1
595
+ charles,4
596
+ charlie,6
597
+ chase,1
598
+ christian,6
599
+ christopher,5
600
+ ciaran,1
601
+ clain,1
602
+ clement,1
603
+ coby,2
604
+ connor,18
605
+ cooper,13
606
+ corey,1
607
+ d'arcy,1
608
+ daen,1
609
+ dakota,1
610
+ dale,2
611
+ damien,2
612
+ daniel,21
613
+ daniele,1
614
+ danjel,1
615
+ danny,1
616
+ dante,4
617
+ darcy,3
618
+ david,5
619
+ deakin,4
620
+ deakyn,1
621
+ dean,2
622
+ declan,1
623
+ declen,1
624
+ devan,1
625
+ dillon,2
626
+ dimitri,1
627
+ dominic,2
628
+ douglas,1
629
+ drew,1
630
+ dylan,13
631
+ eden,1
632
+ edward,3
633
+ elijah,1
634
+ elki,1
635
+ elton,1
636
+ emmet,1
637
+ ethan,19
638
+ evan,4
639
+ ewan,1
640
+ fergus,2
641
+ finlay,3
642
+ finley,1
643
+ finn,3
644
+ finnbar,1
645
+ flynn,7
646
+ francesco,1
647
+ fraser,2
648
+ fynn,1
649
+ gabriel,3
650
+ garth,1
651
+ george,4
652
+ gianni,3
653
+ grayson,1
654
+ gregory,1
655
+ griffin,1
656
+ gus,1
657
+ hamish,4
658
+ hari,1
659
+ harley,2
660
+ harrison,18
661
+ harry,14
662
+ harvey,1
663
+ hayden,9
664
+ heath,2
665
+ henry,5
666
+ hudson,1
667
+ hugh,2
668
+ hugo,3
669
+ hunter,1
670
+ iain,1
671
+ isaac,4
672
+ isaiah,1
673
+ izaac,1
674
+ jack,35
675
+ jackson,12
676
+ jacob,20
677
+ jacobie,1
678
+ jaden,2
679
+ jaggah,1
680
+ jai,3
681
+ jaiden,2
682
+ jairus,1
683
+ jake,14
684
+ jakob,1
685
+ james,28
686
+ jamie,5
687
+ jared,4
688
+ jarod,1
689
+ jarred,1
690
+ jarrod,2
691
+ jarryd,1
692
+ jarvis,1
693
+ jason,1
694
+ jasper,5
695
+ jassim,1
696
+ jaxin,1
697
+ jaxson,1
698
+ jay,1
699
+ jayde,1
700
+ jayden,10
701
+ jaykob,1
702
+ jean-claude,1
703
+ jed,2
704
+ jeremiah,1
705
+ jeremy,1
706
+ jesse,5
707
+ jett,2
708
+ jock,1
709
+ joe,2
710
+ joel,20
711
+ john,3
712
+ john-paul,1
713
+ jonah,1
714
+ jonathon,1
715
+ jordan,11
716
+ jory,1
717
+ joseph,1
718
+ joshua,50
719
+ judah,1
720
+ justin,1
721
+ jye,4
722
+ ka,1
723
+ kade,1
724
+ kadin,1
725
+ kai,3
726
+ kale,1
727
+ kaleb,1
728
+ kane,6
729
+ kayden,1
730
+ kayne,1
731
+ kazuki,1
732
+ keaton,1
733
+ keegan,1
734
+ kenneth,1
735
+ kieran,1
736
+ kieren,2
737
+ kobe,3
738
+ koben,1
739
+ kody,1
740
+ konstantinos,1
741
+ kristo,1
742
+ ky,1
743
+ kydan,1
744
+ kye,2
745
+ kyle,16
746
+ kynan,2
747
+ lachlan,32
748
+ lachlan-john,1
749
+ latham,1
750
+ lawson,1
751
+ lee,1
752
+ leo,2
753
+ leon,2
754
+ levi,2
755
+ lewis,5
756
+ liam,24
757
+ lochlan,2
758
+ logan,3
759
+ louis,2
760
+ luca,1
761
+ lucas,10
762
+ luka,1
763
+ lukas,2
764
+ luke,18
765
+ lynton,1
766
+ mackenzie,1
767
+ mackinley,1
768
+ maconal,1
769
+ macormack,1
770
+ magnus,1
771
+ malakai,1
772
+ marco,1
773
+ marcus,4
774
+ mark,1
775
+ marko,1
776
+ mason,2
777
+ matteus,1
778
+ mattheo,1
779
+ matthew,22
780
+ max,7
781
+ maxin,1
782
+ micah,1
783
+ michael,20
784
+ millane,1
785
+ miller,1
786
+ mitchell,22
787
+ nathan,14
788
+ ned,3
789
+ nicholas,25
790
+ nicolas,1
791
+ noah,13
792
+ oakleigh,1
793
+ oliver,14
794
+ oscar,6
795
+ owen,1
796
+ patrick,6
797
+ paul,1
798
+ pearson,1
799
+ peter,3
800
+ philip,1
801
+ phillip,1
802
+ phoenix,1
803
+ pino,1
804
+ quinn,2
805
+ reece,1
806
+ reeve,1
807
+ reuben,2
808
+ rhett,1
809
+ rhys,3
810
+ richard,2
811
+ ridley,1
812
+ riley,17
813
+ robbie,2
814
+ robert,4
815
+ robin,1
816
+ rohan,1
817
+ ronan,1
818
+ rory,1
819
+ rourke,1
820
+ roy,1
821
+ ruben,1
822
+ rupert,1
823
+ ryan,10
824
+ ryley,1
825
+ sachin,2
826
+ sam,8
827
+ samir,1
828
+ samuel,19
829
+ scott,2
830
+ seamus,1
831
+ sean,4
832
+ sebastian,4
833
+ seth,3
834
+ shane,3
835
+ shannon,1
836
+ shaun,2
837
+ shawn,1
838
+ silas,2
839
+ simon,1
840
+ solomon,1
841
+ spencer,1
842
+ spyke,1
843
+ stelio,1
844
+ stephen,1
845
+ steven,1
846
+ stirling,1
847
+ tai,1
848
+ talan,1
849
+ tanar,1
850
+ tasman,1
851
+ tate,1
852
+ thomas,33
853
+ timothy,11
854
+ toby,5
855
+ todd,1
856
+ tom,1
857
+ tommi-lee,1
858
+ tommy,2
859
+ tony,1
860
+ travis,3
861
+ trent,1
862
+ trevor,1
863
+ trey,2
864
+ tristan,3
865
+ troy,2
866
+ ty,1
867
+ tyler,5
868
+ tynan,2
869
+ tyron,1
870
+ tyrone,1
871
+ vincent,1
872
+ wade,1
873
+ warrick,1
874
+ wil,1
875
+ will,1
876
+ william,26
877
+ wilson,4
878
+ xavier,3
879
+ xepheren,1
880
+ zac,7
881
+ zach,1
882
+ zachariah,2
883
+ zachary,13
884
+ zack,1
885
+ zakariah,1
886
+ zane,4
887
+ zarran,1
888
+ zebediah,1
geco_data_generator/data/givenname_m_freq.csv ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # givenname_m_freq.csv - Frequency table for male given names
3
+ #
4
+ # Sources: - Compiled from various web sites
5
+ #
6
+ # Last update: 11/04/2002, Peter Christen
7
+ # =============================================================================
8
+
9
+ # =============================================================================
10
+ # This table is in a two-column comma separated format, with the first column
11
+ # being the names and the second a corresponding frequency count.
12
+ # =============================================================================
13
+
14
+ aaron,2
15
+ adam,10
16
+ adrian,1
17
+ aidan,6
18
+ aiden,3
19
+ aidyn,1
20
+ ajay,1
21
+ alec,1
22
+ alex,4
23
+ alexander,15
24
+ aloysius,1
25
+ andrew,10
26
+ angus,3
27
+ anthony,3
28
+ anton,1
29
+ antonio,1
30
+ archer,2
31
+ archie,2
32
+ arren,1
33
+ ash,1
34
+ ashley,1
35
+ ashton,1
36
+ ayden,1
37
+ bailey,11
38
+ bailley,1
39
+ barkly,1
40
+ barnaby,2
41
+ baxter,1
42
+ bayden,1
43
+ bayley,2
44
+ beau,2
45
+ ben,2
46
+ benedict,1
47
+ benjamin,31
48
+ bertie,1
49
+ billy,1
50
+ blade,1
51
+ blaize,1
52
+ blake,11
53
+ blakeston,1
54
+ blayke,1
55
+ bodhi,1
56
+ bradley,3
57
+ braedon,3
58
+ braiden,1
59
+ brandon,2
60
+ brayden,2
61
+ brendan,1
62
+ brett,1
63
+ brinley,1
64
+ brock,1
65
+ brodee,2
66
+ brodie,2
67
+ brody,3
68
+ bryce,2
69
+ brydon,1
70
+ byron,1
71
+ cade,1
72
+ cain,2
73
+ caleb,10
74
+ callum,7
75
+ calvin,1
76
+ cameron,9
77
+ campbell,4
78
+ carlin,2
79
+ casey,1
80
+ charles,4
81
+ charlie,6
82
+ chase,1
83
+ christian,6
84
+ christopher,5
85
+ ciaran,1
86
+ clain,1
87
+ clement,1
88
+ coby,2
89
+ connor,18
90
+ cooper,13
91
+ corey,1
92
+ d'arcy,1
93
+ daen,1
94
+ dakota,1
95
+ dale,2
96
+ damien,2
97
+ daniel,21
98
+ daniele,1
99
+ danjel,1
100
+ danny,1
101
+ dante,4
102
+ darcy,3
103
+ david,5
104
+ deakin,4
105
+ deakyn,1
106
+ dean,2
107
+ declan,1
108
+ declen,1
109
+ devan,1
110
+ dillon,2
111
+ dimitri,1
112
+ dominic,2
113
+ douglas,1
114
+ drew,1
115
+ dylan,13
116
+ eden,1
117
+ edward,3
118
+ elijah,1
119
+ elki,1
120
+ elton,1
121
+ emmet,1
122
+ ethan,19
123
+ evan,4
124
+ ewan,1
125
+ fergus,2
126
+ finlay,3
127
+ finley,1
128
+ finn,3
129
+ finnbar,1
130
+ flynn,7
131
+ francesco,1
132
+ fraser,2
133
+ fynn,1
134
+ gabriel,3
135
+ garth,1
136
+ george,4
137
+ gianni,3
138
+ grayson,1
139
+ gregory,1
140
+ griffin,1
141
+ gus,1
142
+ hamish,4
143
+ hari,1
144
+ harley,2
145
+ harrison,18
146
+ harry,14
147
+ harvey,1
148
+ hayden,9
149
+ heath,2
150
+ henry,5
151
+ hudson,1
152
+ hugh,2
153
+ hugo,3
154
+ hunter,1
155
+ iain,1
156
+ isaac,4
157
+ isaiah,1
158
+ izaac,1
159
+ jack,35
160
+ jackson,12
161
+ jacob,20
162
+ jacobie,1
163
+ jaden,2
164
+ jaggah,1
165
+ jai,3
166
+ jaiden,2
167
+ jairus,1
168
+ jake,14
169
+ jakob,1
170
+ james,28
171
+ jamie,5
172
+ jared,4
173
+ jarod,1
174
+ jarred,1
175
+ jarrod,2
176
+ jarryd,1
177
+ jarvis,1
178
+ jason,1
179
+ jasper,5
180
+ jassim,1
181
+ jaxin,1
182
+ jaxson,1
183
+ jay,1
184
+ jayde,1
185
+ jayden,10
186
+ jaykob,1
187
+ jean-claude,1
188
+ jed,2
189
+ jeremiah,1
190
+ jeremy,1
191
+ jesse,5
192
+ jett,2
193
+ jock,1
194
+ joe,2
195
+ joel,20
196
+ john,3
197
+ john-paul,1
198
+ jonah,1
199
+ jonathon,1
200
+ jordan,11
201
+ jory,1
202
+ joseph,1
203
+ joshua,50
204
+ judah,1
205
+ justin,1
206
+ jye,4
207
+ ka,1
208
+ kade,1
209
+ kadin,1
210
+ kai,3
211
+ kale,1
212
+ kaleb,1
213
+ kane,6
214
+ kayden,1
215
+ kayne,1
216
+ kazuki,1
217
+ keaton,1
218
+ keegan,1
219
+ kenneth,1
220
+ kieran,1
221
+ kieren,2
222
+ kobe,3
223
+ koben,1
224
+ kody,1
225
+ konstantinos,1
226
+ kristo,1
227
+ ky,1
228
+ kydan,1
229
+ kye,2
230
+ kyle,16
231
+ kynan,2
232
+ lachlan,32
233
+ lachlan-john,1
234
+ latham,1
235
+ lawson,1
236
+ lee,1
237
+ leo,2
238
+ leon,2
239
+ levi,2
240
+ lewis,5
241
+ liam,24
242
+ lochlan,2
243
+ logan,3
244
+ louis,2
245
+ luca,1
246
+ lucas,10
247
+ luka,1
248
+ lukas,2
249
+ luke,18
250
+ lynton,1
251
+ mackenzie,1
252
+ mackinley,1
253
+ maconal,1
254
+ macormack,1
255
+ magnus,1
256
+ malakai,1
257
+ marco,1
258
+ marcus,4
259
+ mark,1
260
+ marko,1
261
+ mason,2
262
+ matteus,1
263
+ mattheo,1
264
+ matthew,22
265
+ max,7
266
+ maxin,1
267
+ micah,1
268
+ michael,20
269
+ millane,1
270
+ miller,1
271
+ mitchell,22
272
+ nathan,14
273
+ ned,3
274
+ nicholas,25
275
+ nicolas,1
276
+ noah,13
277
+ oakleigh,1
278
+ oliver,14
279
+ oscar,6
280
+ owen,1
281
+ patrick,6
282
+ paul,1
283
+ pearson,1
284
+ peter,3
285
+ philip,1
286
+ phillip,1
287
+ phoenix,1
288
+ pino,1
289
+ quinn,2
290
+ reece,1
291
+ reeve,1
292
+ reuben,2
293
+ rhett,1
294
+ rhys,3
295
+ richard,2
296
+ ridley,1
297
+ riley,17
298
+ robbie,2
299
+ robert,4
300
+ robin,1
301
+ rohan,1
302
+ ronan,1
303
+ rory,1
304
+ rourke,1
305
+ roy,1
306
+ ruben,1
307
+ rupert,1
308
+ ryan,10
309
+ ryley,1
310
+ sachin,2
311
+ sam,8
312
+ samir,1
313
+ samuel,19
314
+ scott,2
315
+ seamus,1
316
+ sean,4
317
+ sebastian,4
318
+ seth,3
319
+ shane,3
320
+ shannon,1
321
+ shaun,2
322
+ shawn,1
323
+ silas,2
324
+ simon,1
325
+ solomon,1
326
+ spencer,1
327
+ spyke,1
328
+ stelio,1
329
+ stephen,1
330
+ steven,1
331
+ stirling,1
332
+ tai,1
333
+ talan,1
334
+ tanar,1
335
+ tasman,1
336
+ tate,1
337
+ thomas,33
338
+ timothy,11
339
+ toby,5
340
+ todd,1
341
+ tom,1
342
+ tommi-lee,1
343
+ tommy,2
344
+ tony,1
345
+ travis,3
346
+ trent,1
347
+ trevor,1
348
+ trey,2
349
+ tristan,3
350
+ troy,2
351
+ ty,1
352
+ tyler,5
353
+ tynan,2
354
+ tyron,1
355
+ tyrone,1
356
+ vincent,1
357
+ wade,1
358
+ warrick,1
359
+ wil,1
360
+ will,1
361
+ william,26
362
+ wilson,4
363
+ xavier,3
364
+ xepheren,1
365
+ zac,7
366
+ zach,1
367
+ zachariah,2
368
+ zachary,13
369
+ zack,1
370
+ zakariah,1
371
+ zane,4
372
+ zarran,1
373
+ zebediah,1
geco_data_generator/data/ocr-variations-upper-lower.csv ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OCR character variations
2
+ #
3
+ 5,S
4
+ 5,s
5
+ 2,Z
6
+ 2,z
7
+ 1,|
8
+ 6,G
9
+ g,9
10
+ q,9
11
+ q,4
12
+ B,8
13
+ A,4
14
+ 0,o
15
+ 0,O
16
+ m,n
17
+ u,v
18
+ U,V
19
+ Y,V
20
+ y,v
21
+ D,O
22
+ Q,O
23
+ F,P
24
+ E,F
25
+ l,J
26
+ j,i
27
+ l,1
28
+ g,q
29
+ h,b
30
+ l,I
31
+ i,'l
32
+ 13,B
33
+ 12,R
34
+ 17,n
35
+ iii,m
36
+ cl,d
37
+ w,vv
38
+ ri,n
39
+ k,lc
40
+ lo,b
41
+ IJ,U
42
+ lJ,U
43
+ LI,U
44
+ I-I,H
45
+ l>,b
46
+ 1>,b
47
+ l<,k
48
+ 1<,k
49
+ m,rn
50
+ l,|
51
+ i,:
geco_data_generator/data/ocr-variations.csv ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OCR character variations
2
+ #
3
+ 5,s
4
+ 2,z
5
+ 1,|
6
+ g,9
7
+ q,9
8
+ q,4
9
+ 0,o
10
+ m,n
11
+ u,v
12
+ y,v
13
+ j,l
14
+ j,i
15
+ l,1
16
+ h,b
17
+ l,i
18
+ i,'l
19
+ iii,m
20
+ cl,d
21
+ w,vv
22
+ ri,n
23
+ k,lc
24
+ lo,b
25
+ l>,b
26
+ 1>,b
27
+ l<,k
28
+ 1<,k
29
+ m,rn
30
+ l,|
31
+ i,:
geco_data_generator/data/phonetic-variations.csv ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Phonetic variation patterns, developed by Agus Pudjijono, ANU, 2008
2
+ #
3
+ ALL,h,@,None,None,None,None
4
+ END,e,@,None,None,None,None
5
+ ALL,t,d,None,None,None,None
6
+ ALL,d,t,None,None,None,None
7
+ ALL,c,k,None,None,None,None
8
+ ALL,w,@,None,None,None,None
9
+ ALL,nn,n,None,None,None,None
10
+ ALL,ll,l,None,None,None,None
11
+ ALL,ee,i,None,None,None,None
12
+ ALL,z,s,None,None,None,None
13
+ ALL,s,z,None,None,None,None
14
+ ALL,ie,i,None,None,None,None
15
+ END,y,i,None,None,None,None
16
+ ALL,g,k,None,None,None,None
17
+ ALL,th,t,None,None,None,None
18
+ ALL,rr,r,None,None,None,None
19
+ ALL,b,p,None,None,None,None
20
+ ALL,j,s,None,None,None,None
21
+ ALL,ey,y,None,None,None,None
22
+ ALL,s,j,None,None,None,None
23
+ ALL,tt,t,None,None,None,None
24
+ ALL,ei,i,None,None,None,None
25
+ ALL,ou,o,None,None,None,None
26
+ ALL,au,o,None,None,None,None
27
+ ALL,ck,k,None,None,None,None
28
+ ALL,ia,ya,None,None,None,None
29
+ ALL,j,z,None,None,None,None
30
+ ALL,z,j,None,None,None,None
31
+ ALL,k,ck,None,None,None,None
32
+ ALL,ya,ia,None,None,None,None
33
+ ALL,v,f,None,None,None,None
34
+ ALL,f,v,None,None,None,None
35
+ ALL,s,st,None,None,None,None
36
+ ALL,oo,u,None,None,None,None
37
+ ALL,ph,f,None,None,None,None
38
+ ALL,mm,m,None,None,None,None
39
+ ALL,ce,se,None,None,None,None
40
+ ALL,ry,rie,None,None,None,None
41
+ ALL,rie,ry,None,None,None,None
42
+ ALL,ff,f,None,None,None,None
43
+ ALL,x,@,None,None,None,None
44
+ ALL,q,k,None,None,None,None
45
+ END,ss,s,None,None,None,None
46
+ MIDDLE,j,y,V,V,None,None
47
+ ALL,sh,x,None,None,None,None
48
+ START,ha,h,None,None,None,None
49
+ ALL,ng,nk,None,None,None,None
50
+ END,es,s,None,None,None,None
51
+ ALL,pp,p,None,None,None,None
52
+ START,ch,x,None,None,None,None
53
+ ALL,dd,t,None,None,None,None
54
+ ALL,nk,ng,None,None,None,None
55
+ ALL,sch,sh,None,None,None,None
56
+ ALL,ci,si,None,None,None,None
57
+ ALL,aa,ar,None,None,None,None
58
+ ALL,kk,k,None,None,None,None
59
+ START,ho,h,None,None,None,None
60
+ END,ee,ea,None,None,None,None
61
+ END,sz,s,None,None,None,None
62
+ START,ts,t,None,V,None,None
63
+ ALL,sz,s,None,None,None,None
64
+ ALL,cc,k,None,None,None,None
65
+ ALL,gg,k,None,None,None,None
66
+ START,he,h,None,None,None,None
67
+ MIDDLE,gh,@,y;-2;b;h;d,None,None,None
68
+ END,r,ah,V,None,None,None
69
+ ALL,tch,ch,None,None,None,None
70
+ START,hu,h,None,None,None,None
71
+ MIDDLE,ch,x,None,None,None,None
72
+ MIDDLE,z,s,None,V,None,None
73
+ ALL,zz,s,None,None,None,None
74
+ END,re,ar,None,None,None,None
75
+ START,sl,s,None,None,None,None
76
+ ALL,cia,sia,None,None,None,None
77
+ START,oh,h,None,None,None,None
78
+ ALL,bb,p,None,None,None,None
79
+ ALL,sc,sk,None,None,None,None
80
+ ALL,cy,si,None,None,None,None
81
+ START,ah,h,None,None,None,None
82
+ ALL,zs,s,None,None,None,None
83
+ ALL,ca,ka,None,None,None,None
84
+ ALL,dg,g,None,None,None,None
85
+ ALL,yth,ith,None,None,None,None
86
+ ALL,cy,sy,None,None,None,None
87
+ START,kn,n,None,None,None,None
88
+ START,sn,s,None,None,None,None
89
+ START,hi,h,None,None,None,None
90
+ ALL,wha,wa,None,None,None,None
91
+ START,sm,s,None,None,None,None
92
+ ALL,isl,il,None,None,None,None
93
+ END,gh,e,V,None,None,None
94
+ ALL,co,ko,None,None,None,None
95
+ MIDDLE,gi,ji,None,None,None,None
96
+ ALL,cio,sio,None,None,None,None
97
+ END,ss,as,V,None,None,None
98
+ END,gn,n,None,None,None,None
99
+ END,gne,n,None,None,None,None
100
+ ALL,mps,ms,None,None,None,None
101
+ END,le,ile,C,None,None,None
102
+ ALL,whi,wi,None,None,None,None
103
+ ALL,tia,xia,None,None,None,None
104
+ MIDDLE,stl,sl,V,None,None,None
105
+ END,sch,x,None,None,None,None
106
+ ALL,cia,xia,None,None,None,None
107
+ ALL,jj,j,None,None,None,None
108
+ START,cy,s,None,None,None,None
109
+ MIDDLE,sch,x,None,None,None,None
110
+ ALL,cie,sie,None,None,None,None
111
+ START,cz,c,None,None,None,None
112
+ START,eh,h,None,None,None,None
113
+ ALL,tch,x,None,None,None,None
114
+ ALL,mpt,mt,None,None,None,None
115
+ ALL,cg,k,None,None,None,None
116
+ ALL,umb,um,None,None,None,None
117
+ ALL,gh,k,n;-1;i,None,None,None
118
+ START,hy,h,None,None,None,None
119
+ START,gn,n,None,None,None,None
120
+ ALL,sce,se,None,None,None,None
121
+ ALL,sci,si,None,None,None,None
122
+ END,hr,ah,V,None,None,None
123
+ END,mb,m,V,None,None,None
124
+ ALL,lough,low,None,None,None,None
125
+ ALL,why,wy,None,None,None,None
126
+ ALL,ght,t,None,None,None,None
127
+ ALL,whe,we,None,None,None,None
128
+ ALL,rz,rsh,None,None,None,None
129
+ START,chr,kr,None,V,None,None
130
+ ALL,cq,k,None,None,None,None
131
+ ALL,ghn,n,None,None,None,None
132
+ START,x,s,None,None,None,None
133
+ END,dl,dil,None,None,None,None
134
+ START,mn,n,None,V,None,None
135
+ START,pt,t,None,None,None,None
136
+ ALL,lle,le,None,None,None,None
137
+ ALL,qq,k,None,None,None,None
138
+ START,chh,kh,None,None,None,None
139
+ START,ih,h,None,None,None,None
140
+ MIDDLE,lj,ld,V,V,None,None
141
+ ALL,zz,ts,None,None,None,None
142
+ MIDDLE,ach,k,None,n;1;i;e,None,None
143
+ END,dt,t,None,None,None,None
144
+ ALL,td,t,None,None,None,None
145
+ END,ned,nd,None,None,None,None
146
+ ALL,lz,lsh,None,None,None,None
147
+ ALL,ghne,ne,None,None,None,None
148
+ MIDDLE,z,ts,C,None,None,None
149
+ START,cl,kl,None,V,None,None
150
+ START,pf,f,None,None,None,None
151
+ START,uh,h,None,None,None,None
152
+ START,tj,ch,None,V,None,None
153
+ START,gh,g,None,None,None,None
154
+ MIDDLE,gy,ky,n;-1;e;i,None,n;rgy;ogy,None
155
+ MIDDLE,r,ah,V,C,None,None
156
+ MIDDLE,cz,ch,None,None,None,None
157
+ ALL,cci,xi,None,None,None,None
158
+ END,tl,til,None,None,None,None
159
+ ALL,ough,of,None,None,None,None
160
+ ALL,ff,v,None,None,None,None
161
+ START,cr,kr,None,V,None,None
162
+ START,gh,@,None,None,None,None
163
+ ALL,cu,ku,None,None,None,None
164
+ MIDDLE,aggi,aji,None,None,None,None
165
+ END,ew,e,None,None,None,None
166
+ ALL,nc,nk,None,None,None,None
167
+ START,ps,s,None,None,None,None
168
+ ALL,xx,@,None,None,None,None
169
+ ALL,who,wo,None,None,None,None
170
+ ALL,wr,r,None,None,None,None
171
+ END,ow,o,None,None,None,None
172
+ ALL,sholm,solm,None,None,None,None
173
+ ALL,vv,f,None,None,None,None
174
+ ALL,gh,f,y;-1;u|y;-3;c;g;l;r;t,None,None,None
175
+ ALL,wh,h,None,None,None,None
176
+ ALL,wicz,wis,None,None,None,None
177
+ MIDDLE,q,kw,V,V,None,None
178
+ ALL,ysl,yl,None,None,None,None
179
+ ALL,vv,ff,None,None,None,None
180
+ ALL,ff,vv,None,None,None,None
181
+ START,pn,n,None,V,None,None
182
+ START,ghi,j,None,None,None,None
183
+ START,gin,kin,None,None,None,None
184
+ ALL,dge,je,None,None,None,None
185
+ ALL,whu,wu,None,None,None,None
186
+ ALL,tion,xion,None,None,None,None
187
+ END,gnes,ns,None,None,None,None
188
+ START,gy,ky,None,None,None,None
189
+ START,gie,kie,None,None,None,None
190
+ MIDDLE,mch,mk,None,None,None,None
191
+ ALL,jr,dr,None,None,None,None
192
+ ALL,xc,@,None,None,None,None
193
+ START,gey,key,None,None,None,None
194
+ START,vanch,vank,None,None,None,None
195
+ END,gc,k,None,None,None,None
196
+ END,jc,k,None,None,None,None
197
+ START,wr,r,None,None,None,None
198
+ ALL,ct,kt,None,None,None,None
199
+ ALL,btl,tl,None,None,None,None
200
+ ALL,augh,arf,None,None,None,None
201
+ START,q,kw,None,None,None,None
202
+ MIDDLE,gn,n,None,C,None,None
203
+ MIDDLE,wz,z,V,None,None,None
204
+ ALL,hroug,rew,None,None,None,None
205
+ START,yj,y,None,V,None,None
206
+ ALL,nx,nks,None,None,None,None
207
+ START,tsj,ch,None,V,None,None
208
+ MIDDLE,wsk,vskie,V,None,None,None
209
+ END,wsk,vskie,V,None,None,None
210
+ END,stl,sl,V,None,None,None
211
+ END,tnt,ent,None,None,None,None
212
+ END,eaux,oh,None,None,None,None
213
+ ALL,exci,ecs,None,None,None,None
214
+ ALL,x,ecs,None,None,None,None
215
+ MIDDLE,hr,ah,V,C,None,None
216
+ END,les,iles,C,None,None,None
217
+ ALL,mpts,mps,None,None,None,None
218
+ MIDDLE,bacher,baker,None,None,None,None
219
+ MIDDLE,macher,maker,None,None,None,None
220
+ START,caesar,sesar,None,None,None,None
221
+ ALL,chia,kia,None,None,None,None
222
+ MIDDLE,chae,kae,None,None,None,None
223
+ START,charac,karak,None,None,None,None
224
+ START,charis,karis,None,None,None,None
225
+ START,chor,kor,None,None,None,None
226
+ START,chym,kym,None,None,None,None
227
+ START,chia,kia,None,None,None,None
228
+ START,chem,kem,None,None,None,None
229
+ START,chl,kl,None,None,None,None
230
+ START,chn,kn,None,None,None,None
231
+ START,chm,km,None,None,None,None
232
+ START,chb,kb,None,None,None,None
233
+ START,chf,kf,None,None,None,None
234
+ START,chv,kv,None,None,None,None
235
+ START,chw,kw,None,None,None,None
236
+ ALL,achl,akl,None,None,None,None
237
+ ALL,ochl,okl,None,None,None,None
238
+ ALL,uchl,ukl,None,None,None,None
239
+ ALL,echl,ekl,None,None,None,None
240
+ ALL,achr,akr,None,None,None,None
241
+ ALL,ochr,okr,None,None,None,None
242
+ ALL,uchr,ukr,None,None,None,None
243
+ ALL,echr,ekr,None,None,None,None
244
+ ALL,achn,akn,None,None,None,None
245
+ ALL,ochn,okn,None,None,None,None
246
+ ALL,uchn,ukn,None,None,None,None
247
+ ALL,echn,ekn,None,None,None,None
248
+ ALL,achm,akm,None,None,None,None
249
+ ALL,ochm,okm,None,None,None,None
250
+ ALL,uchm,ukm,None,None,None,None
251
+ ALL,echm,ekm,None,None,None,None
252
+ ALL,achb,akb,None,None,None,None
253
+ ALL,ochb,okb,None,None,None,None
254
+ ALL,uchb,ukb,None,None,None,None
255
+ ALL,echb,ekb,None,None,None,None
256
+ ALL,achh,akh,None,None,None,None
257
+ ALL,ochh,okh,None,None,None,None
258
+ ALL,uchh,ukh,None,None,None,None
259
+ ALL,echh,ekh,None,None,None,None
260
+ ALL,achf,akf,None,None,None,None
261
+ ALL,ochf,okf,None,None,None,None
262
+ ALL,uchf,ukf,None,None,None,None
263
+ ALL,echf,ekf,None,None,None,None
264
+ ALL,achv,akv,None,None,None,None
265
+ ALL,ochv,okv,None,None,None,None
266
+ ALL,uchv,ukv,None,None,None,None
267
+ ALL,echv,ekv,None,None,None,None
268
+ ALL,achw,akw,None,None,None,None
269
+ ALL,ochw,okw,None,None,None,None
270
+ ALL,uchw,ukw,None,None,None,None
271
+ ALL,echw,ekw,None,None,None,None
272
+ ALL,cht,kt,None,None,None,None
273
+ ALL,chs,ks,None,None,None,None
274
+ ALL,orches,orkes,None,None,None,None
275
+ ALL,archit,arkit,None,None,None,None
276
+ ALL,orchid,orkid,None,None,None,None
277
+ START,sch,sk,None,None,None,None
278
+ START,vonch,vonk,None,None,None,None
279
+ ALL,acci,aksi,None,n;1;hu,None,None
280
+ ALL,acce,akse,None,n;1;hu,None,None
281
+ ALL,acch,aksh,None,n;1;hu,None,None
282
+ ALL,uccee,uksee,None,None,None,None
283
+ ALL,ucces,ukses,None,None,None,None
284
+ ALL,cce,xi,None,None,None,None
285
+ ALL,cch,xh,None,None,None,None
286
+ ALL,cc,k,None,None,None,None
287
+ ALL,cq,k,None,None,None,None
288
+ ALL,cg,k,None,None,None,None
289
+ ALL,dgi,ji,None,None,None,None
290
+ ALL,dgy,jy,None,None,None,None
291
+ ALL,dg,tk,None,None,None,None
292
+ ALL,dt,t,None,None,None,None
293
+ MIDDLE,gh,k,n;-1;a;i;u;e;o,None,None,None
294
+ START,gh,k,None,None,None,None
295
+ MIDDLE,gn,kn,y;-1;a;i;u;e;o,None,n;slavo,y;a;i;u;e;o
296
+ MIDDLE,gney,ney,None,None,n;slavo,None
297
+ MIDDLE,gli,kli,None,None,n;slavo,None
298
+ START,ges,kes,None,None,None,None
299
+ START,gep,kep,None,None,None,None
300
+ START,geb,keb,None,None,None,None
301
+ START,gel,kel,None,None,None,None
302
+ START,gib,kib,None,None,None,None
303
+ START,gil,kil,None,None,None,None
304
+ START,gei,kei,None,None,None,None
305
+ START,ger,ker,None,None,None,None
306
+ MIDDLE,ger,ker,n;-1;e;i,None,n;danger;ranger;manger,None
307
+ MIDDLE,ge,ke,None,None,None,y;van;von;sch
308
+ MIDDLE,gi,ki,None,None,None,y;van;von;sch
309
+ MIDDLE,aggi,aki,None,None,None,y;van;von;sch
310
+ MIDDLE,oggi,oki,None,None,None,y;van;von;sch
311
+ MIDDLE,oggi,oji,None,None,None,None
312
+ MIDDLE,ge,jy,None,None,None,None
313
+ MIDDLE,gy,jy,None,None,None,None
314
+ MIDDLE,gier,jier,None,None,None,None
315
+ MIDDLE,get,ket,None,None,None,None
316
+ START,yh,h,None,None,None,None
317
+ MIDDLE,sanjose,sanhose,None,None,None,None
318
+ END,illo,ilo,None,None,None,None
319
+ END,illa,ila,None,None,None,None
320
+ END,alle,ale,None,None,None,None
321
+ ALL,pb,p,None,None,None,None
322
+ START,sugar,xugar,None,None,None,None
323
+ ALL,sheim,seim,None,None,None,None
324
+ ALL,shoek,soek,None,None,None,None
325
+ ALL,sholz,solz,None,None,None,None
326
+ START,sw,s,None,None,None,None
327
+ ALL,scher,xer,None,None,None,None
328
+ ALL,schen,xen,None,None,None,None
329
+ ALL,schoo,skoo,None,None,None,None
330
+ ALL,schuy,skuy,None,None,None,None
331
+ ALL,sched,sked,None,None,None,None
332
+ ALL,schem,skem,None,None,None,None
333
+ START,sch,x,None,n;1;a;i;u;e;o;w,None,None
334
+ ALL,scy,sy,None,None,None,None
335
+ END,aiss,ai,None,None,None,None
336
+ END,aisz,ai,None,None,None,None
337
+ END,oiss,oi,None,None,None,None
338
+ END,oisz,oi,None,None,None,None
339
+ ALL,tth,t,None,None,None,None
340
+ END,aw,a,None,None,None,None
341
+ END,iw,i,None,None,None,None
342
+ END,uw,u,None,None,None,None
343
+ END,yw,y,None,None,None,None
344
+ ALL,witz,ts,None,None,None,None
345
+ ALL,wicz,ts,None,None,None,None
346
+ END,iaux,iauks,None,None,None,None
347
+ END,eaux,eauks,None,None,None,None
348
+ END,aux,auks,None,None,None,None
349
+ END,oux,ouks,None,None,None,None
350
+ ALL,zh,jh,None,None,None,None
351
+ END,i,y,None,None,None,None
352
+ ALL,zzo,so,None,None,None,None
353
+ ALL,zzi,si,None,None,None,None
354
+ ALL,zza,sa,None,None,None,None
355
+ MIDDLE,z,s,n;-1;t,None,y;slavo,None
356
+ MIDDLE,ks,x,None,None,None,None
357
+ MIDDLE,cks,x,y;-1;a;i;u;e;o,None,None,None
358
+ END,l,le,y;-1;ai,None,None,None
geco_data_generator/data/postcode_act_freq.csv ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # postcode_act_freq.csv - Frequency table for postcodes in the ACT
3
+ #
4
+ # Based on 'Australia on Disk' data files (2002)
5
+ #
6
+ # Last update: 31/03/2005, Peter Christen
7
+ # =============================================================================
8
+
9
+ # =============================================================================
10
+ # This table is in a two-column comma separated format, with the first column
11
+ # being the names and the second a corresponding frequency count.
12
+ # =============================================================================
13
+
14
+ 2582,4
15
+ 2586,24
16
+ 2600,2912
17
+ 2601,546
18
+ 2602,10842
19
+ 2603,3455
20
+ 2604,3359
21
+ 2605,4581
22
+ 2606,3416
23
+ 2607,5504
24
+ 2608,4
25
+ 2609,287
26
+ 2611,9273
27
+ 2612,4157
28
+ 2614,7762
29
+ 2615,13417
30
+ 2617,8737
31
+ 2618,457
32
+ 2620,273
33
+ 2621,98
34
+ 2900,424
35
+ 2902,5781
36
+ 2903,3472
37
+ 2904,4525
38
+ 2905,9313
39
+ 2906,4848
40
+ 2911,34
41
+ 2912,65
42
+ 2913,6823
43
+ 2914,1164
geco_data_generator/data/qwerty-keyboard.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # QWERTY keyboad layout
2
+ #
3
+ q,w,e,r,t,y,u,i,o,p
4
+ a,s,d,f,g,h,j,k,l
5
+ z,x,c,v,b,n,m
6
+ #
7
+ 7,8,9
8
+ 4,5,6
9
+ 1,2,3
geco_data_generator/data/state-income.csv ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Look-up file for generating a compound attribute: state / income. For details
2
+ # see module generator.py, class GenerateCateContCompoundAttribute
3
+ #
4
+ act,7,uniform,20000,200000
5
+ nsw,25,uniform,25000,500000
6
+ vic,21,normal,45000,10000,15000,None
7
+ qld,14,uniform,20000,100000
8
+ nt,3,uniform,20000,100000
9
+ tas,8,normal,25000,20000,10000,None
10
+ sa,12,uniform,25000,250000
11
+ wa,10,normal,65000,60000,20000,None
geco_data_generator/data/surname-freq-japanese.csv ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ����,5
2
+ �–�,30
3
+ ��o,110
4
+ �ɏW�@,5
5
+ �ɓ�,2000
6
+ ���c,281
7
+ ����,872
8
+ ����,390
9
+ �ē�,901
10
+ ����,5016
11
+ ���R,90
12
+ ���,2098
13
+ �c��,2592
14
+ ���L,421
15
+ �y�c,120
16
+ �L�c,208
17
+ ���{,872
18
+ �X�c,398
19
+ �R�{,1104
20
+ �{�c,383
21
+ �g�c,981
22
+ �n��,123
geco_data_generator/data/surname-freq.csv ADDED
The diff for this file is too large to render. See raw diff
 
geco_data_generator/data/surname-misspell-japanese.csv ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ����,����
2
+ ��o,���
3
+ �ɓ�,�ɓ�
4
+ �ē�,�V��
5
+ �ē�,�֓�
6
+ ���c,��c
7
+ ����,�Z��
8
+ ���R,��R
9
+ ���L,���A
10
+ ���L,���L
11
+ ���L,���A
12
+ �L�c,�A�c
13
+ �n��,�n�
14
+ �n��,�n�
geco_data_generator/data/surname-misspell.csv ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # surname-misspell.csv - Lookup-table for surname misspellings used for the
3
+ # database generator
4
+ #
5
+ # - The word left of the comma is the correct spelling, right of the comma is
6
+ # the corresponding misspelling
7
+ # - Several misspellings can be given fir the same correct spelling
8
+ #
9
+ # Last update: 18/03/2012, Peter Christen
10
+ # =============================================================================
11
+
12
+ aitchison,acheson
13
+ aitken,aiken
14
+ aldis,aldous
15
+ almond,allmond
16
+ althea,thea
17
+ arden,ardie
18
+ arland,arley
19
+ arland,arlie
20
+ arland,arly
21
+ armen,armand
22
+ arnoud,arnout
23
+ avice,avis
24
+ avice,awis
25
+
26
+ barclay,berkeley
27
+ baxter,bax
28
+ blythe,blithe
29
+ boleslaw,bolestlaw
30
+ bowden,bowen
31
+ boyle,oboyle
32
+ bree,obree
33
+ brice,bryce
34
+ brian,brien
35
+ brian,briant
36
+ brian,bryan
37
+ brian,bryant
38
+ brian,obrian
39
+ brian,obrien
40
+ brian,obryan
41
+ brinley,brindley
42
+ bruckner,brukner
43
+ burl,burlie
44
+ burrows,burroughs
45
+ bush,busch
46
+ byers,byas
47
+ byers,byass
48
+ byrne,byrnes
49
+
50
+ cain,caine
51
+ cain,cane
52
+ cain,conn
53
+ cain,kain
54
+ cain,kahn
55
+ cain,kon
56
+ cain,okane
57
+ cain,okeane
58
+ callaghan,callahan
59
+ callaghan,ocallaghan
60
+ calvert,calvat
61
+ campbell,cambell
62
+ car,kaare
63
+ carlisle,carlile
64
+ carlisle,carlyle
65
+ castle,cassel
66
+ castle,cassell
67
+ castle,kassel
68
+ chapman,chap
69
+ cletus,clete
70
+ cleveland,cleave
71
+ cleveland,cleve
72
+ clyne,klein
73
+ coldbeck,colbeck
74
+ coleman,colman
75
+ concepcion,conchita
76
+ connell,oconnell
77
+ connor,conner
78
+ connor,oconner
79
+ connor,oconnor
80
+ corbett,corby
81
+ cosh,koch
82
+ cramer,kramer
83
+ cramer,kraymer
84
+ cramer,kremer
85
+ creighton,crichton
86
+ crowe,krogh
87
+ cruise,crews
88
+ cruise,kruse
89
+ cummins,cummings
90
+ cyrus,cy
91
+
92
+ davidson,davison
93
+ davidson,davissen
94
+ davina,davida
95
+ dea,odea
96
+ dell,odell
97
+ denbeigh,denby
98
+ denholm,denham
99
+ dennis,dennys
100
+ desmond,desmon
101
+ dickson,dixon
102
+ dinh,din
103
+ dinh,dingh
104
+ dinning,dinnin
105
+ doherty,docherty
106
+ doherty,dougherty
107
+ doherty,odoherty
108
+ dolman,dollman
109
+ donaghue,odonaghue
110
+ donaghue,odonahoo
111
+ donaghue,odonahue
112
+ donnell,odonnell
113
+ donohoe,donoghue
114
+ donovan,odonovan
115
+ dowd,odowd
116
+ driscoll,odriscill
117
+ driscoll,odriscoll
118
+ dudley,dud
119
+ dudley,dudleigh
120
+ duff,duffy
121
+ duke,dukes
122
+ dwyer,odwyer
123
+
124
+ eames,ames
125
+ eames,amies
126
+ elder,elde
127
+ elms,elemes
128
+ engle,engel
129
+ engle,ingle
130
+ english,inglish
131
+ english,ingliss
132
+ eyres,ayers
133
+ eyres,ayres
134
+ ezekiel,zech
135
+ ezekiel,zeke
136
+
137
+ farrell,ofarrell
138
+ faulkner,falc
139
+ faulkner,falcon
140
+ faulkner,falconer
141
+ faulkner,falk
142
+ faulkner,falkiner
143
+ faulkner,fawkner
144
+ finlay,findlay
145
+ finlay,findley
146
+ fitzner,pfitzner
147
+ flaherty,oflaherty
148
+ flanagan,oflanagan
149
+ flynn,oflynn
150
+ ford,forde
151
+ forster,foster
152
+ freeman,freedman
153
+ french,ffrench
154
+ frost,ffrost
155
+
156
+ gallagher,ogallagher
157
+ gara,ogara
158
+ gaven,gavin
159
+ geraghty,garretty
160
+ geraghty,garrety
161
+ geraghty,garrity
162
+ gillman,gilman
163
+ gordon,gordie
164
+ gordon,gordy
165
+ gorman,ogorman
166
+ gough,goff
167
+ gower,ogower
168
+ grady,ogrady
169
+
170
+ hadleigh,hadley
171
+ hagan,ohagan
172
+ hallor,ohallor
173
+ halloran,ohalloran
174
+ han,ohan
175
+ hanaffian,ohanaffian
176
+ hancock,handcock
177
+ hanes,ohanes
178
+ hanessian,ohanessian
179
+ hanian,ohanian
180
+ hanley,hambley
181
+ hanley,hamley
182
+ hanley,handley
183
+ hanlon,ohanlon
184
+ hannes,ohannes
185
+ hannessian,ohannessian
186
+ har,ohar
187
+ hara,ohara
188
+ hara,oharae
189
+ hare,ohaiher
190
+ hare,ohaire
191
+ hare,ohare
192
+ hare,ohegir
193
+ hare,ohehar
194
+ hare,ohehier
195
+ hare,ohehir
196
+ hare,oheir
197
+ harford,hartford
198
+ harris,oharris
199
+ hart,ohart
200
+ hawkyard,halkyard
201
+ hayon,ohayon
202
+ hayward,haywood
203
+ hayward,heyward
204
+ hayward,heywood
205
+ hayward,howard
206
+ hazy,ohazy
207
+ hea,ohea
208
+ hearn,ohearn
209
+ hodgson,hodson
210
+ horton,hawtin
211
+ horton,houghton
212
+ hough,hoff
213
+ hudson,housten
214
+ hudson,houston
215
+ hudson,huston
216
+ hughes,hewes
217
+ humphrey,humfrey
218
+ humphrey,onofredo
219
+
220
+ irving,ervy
221
+ irving,irvin
222
+ irving,irvine
223
+ islay,isles
224
+
225
+ johnson,johnston
226
+ johnson,johnstone
227
+ junior,jnr
228
+ juris,yuris
229
+
230
+ kantor,cantor
231
+ karlsen,carlson
232
+ kavanagh,cavanagh
233
+ kavanagh,cavanough
234
+ kavanagh,cavenagh
235
+ kealley,keighley
236
+ kearney,carney
237
+ kearney,carnie
238
+ kearney,okearney
239
+ keefe,okeefe
240
+ keese,okeese
241
+ keil,okeil
242
+ keith,okeith
243
+ kel,okel
244
+ kennely,kennerley
245
+ kenneth,ken
246
+ kenneth,keneth
247
+ kenneth,kenethe
248
+ kenneth,kenith
249
+ kenneth,kennith
250
+ kenneth,kenny
251
+ kirby,kirkby
252
+ klap,clap
253
+ klap,clapp
254
+ kohen,coen
255
+ kohen,cohen
256
+ kohen,cohn
257
+ koster,costa
258
+ koster,coster
259
+
260
+ laco,olaco
261
+ langford,langsford
262
+ laslo,laszlo
263
+ laughton,lawton
264
+ leach,leitch
265
+ leary,oleary
266
+ lennox,leannox
267
+ lisle,lyal
268
+ lisle,lydall
269
+ lisle,lyle
270
+ lloyd,floyd
271
+ loan,oloan
272
+ lachlan,loughlin
273
+ lachlan,olachlan
274
+ lachlan,olaughlan
275
+ lachlan,ologhlin
276
+ lachlan,oloughlan
277
+ lynam,olynam
278
+ lyndon,lindon
279
+
280
+ mace,maze
281
+ maddock,maddocks
282
+ maher,maier
283
+ maher,mayar
284
+ maher,meagher
285
+ maher,omeagher
286
+ mahon,mann
287
+ mahony,mahoney
288
+ mahony,omahoney
289
+ mahony,omahony
290
+ mai,may
291
+ malley,omalley
292
+ malley,omally
293
+ malley,omeley
294
+ mara,omara
295
+ mara,omeara
296
+ mara,omera
297
+ marsh,march
298
+ massey,massie
299
+ matheson,matherson
300
+ mavis,mab
301
+ mervyn,merv
302
+ mervyn,mervin
303
+ meyer,myer
304
+ miles,myles
305
+ millicent,mildred
306
+ millie,milli
307
+ millie,milly
308
+ mitchell,michaal
309
+ mitchell,michaele
310
+ mitchell,michaell
311
+ mitchell,micheal
312
+ mitchell,michell
313
+ mitchell,mick
314
+ mitchell,micky
315
+ mitchell,mikcos
316
+ mitchell,mike
317
+ mitchell,mitch
318
+ montague,monty
319
+ mosley,morsley
320
+ mullane,omullane
321
+
322
+ nada,nadine
323
+ nains,onains
324
+ nevern,nevin
325
+ newbury,newby
326
+ norton,naughton
327
+ norton,naunton
328
+ nosworthy,norsworthy
329
+
330
+ ogden,oddie
331
+ ogden,oddy
332
+
333
+ panagiotis,panayotis
334
+ patterson,pattison
335
+ pearce,pearse
336
+ powers,powys
337
+ prendergast,pendergast
338
+ price,preiss
339
+ pritchard,prichard
340
+
341
+ quinton,quentin
342
+
343
+ raleigh,rawleigh
344
+ raleigh,rawley
345
+ raleigh,rowley
346
+ redman,redmond
347
+ reece,rees
348
+ regan,oregan
349
+ reilly,oreilly
350
+ reilly,orielly
351
+ reuben,rube
352
+ reuben,ruben
353
+ reynard,reyner
354
+ reynolds,reynol
355
+ reynolds,reynold
356
+ roach,roache
357
+ roach,roatch
358
+ roach,roche
359
+ robertson,roberson
360
+ robertson,robieson
361
+ robertson,robinson
362
+ roer,ruggiero
363
+ rogers,rodgers
364
+ rourke,orourke
365
+ ruthen,ruthven
366
+
367
+ sampson,samson
368
+ sanderson,sandison
369
+ sandford,sanford
370
+ schultz,scholz
371
+ schultz,schulz
372
+ schultz,schulze
373
+ schultz,shultz
374
+ schultz,shultze
375
+ schwartz,schwarz
376
+ seaton,seton
377
+ shana,oshana
378
+ shana,oshanna
379
+ shannessy,oshanassy
380
+ shannessy,oshanesy
381
+ shannessy,oshannessy
382
+ shannessy,oshaughnessy
383
+ shea,oshae
384
+ shea,oshea
385
+ sheehy,osheehy
386
+ sheils,shields
387
+ sheils,shiels
388
+ shell,schell
389
+ simmons,simmins
390
+ simmons,simmonds
391
+ simmons,simons
392
+ simmons,symanns
393
+ simmons,symonds
394
+ slavomir,slawomir
395
+ solomon,sol
396
+ solomon,saul
397
+ spiro,spiros
398
+ staughton,staunton
399
+ stbaker,saintbaker
400
+ stclair,saintclair
401
+ stclair,saintclaire
402
+ stclair,saintclare
403
+ stclair,stclaire
404
+ stclair,stclare
405
+ stcloud,saintcloud
406
+ stdenis,saintdenis
407
+ stdenis,saintdennis
408
+ stdenis,stdennis
409
+ stephens,stevens
410
+ stgeorge,saintgeorge
411
+ stjack,saintjack
412
+ stjohn,saintjohn
413
+ stjohnwood,saintjohnwood
414
+ stjulian,saintjulian
415
+ stlaurence,saintlaurence
416
+ stleon,saintleon
417
+ sullivan,osullivan
418
+
419
+ thompson,thomsen
420
+ thompson,thomson
421
+ toole,otoole
422
+
423
+ vaughn,vaughan
424
+
425
+ warner,warnie
426
+ weiner,wiener
427
+ weiner,wierner
428
+ weston,western
429
+ white,wight
430
+ whiteman,whitmann
431
+ whiteman,wightman
432
+ whitman,whit
433
+ wilmot,willmott
434
+ wiltshire,willshire
435
+ windsor,winsor
436
+
437
+ young,oyoung
438
+
439
+ zadie,zaidie
440
+ zbigneif,zbignev
441
+ zbigneif,zbignief
442
+ zbigneif,zbigniew
443
+ zdislaw,zdzislaw
geco_data_generator/generator.py ADDED
@@ -0,0 +1,2067 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Main classes to generate records and the data set
4
+ import random
5
+
6
+ from geco_data_generator import basefunctions
7
+
8
+
9
+ # =============================================================================
10
+ # Classes for generating a single attribute (field) of the data set
11
+ # =============================================================================
12
+
13
+
14
+ class GenerateAttribute:
15
+ """Base class for the definition of a single attribute (field) to be
16
+ generated.
17
+
18
+ This class and all of its derived classes provide methods that allow the
19
+ definition of a single attribute and the parameters necessary for its
20
+ generation.
21
+
22
+ The following variables need to be set when a GenerateAttribute instance
23
+ is initialised (with further parameters listed in the derived classes):
24
+
25
+ attribute_name The name of this attribute, which will be used in the
26
+ header line to be written into the output file.
27
+
28
+ Ideally, this attribute name should be short, not contain
29
+ spaces and it must not contain any quote or punctuation
30
+ characters.
31
+ """
32
+
33
+ # ---------------------------------------------------------------------------
34
+
35
+ def __init__(self, base_kwargs):
36
+ """Constructor, set general attributes."""
37
+
38
+ # General attributes for all attribute generators
39
+ #
40
+ self.attribute_name = None
41
+
42
+ # Process the keyword argument (all keywords specific to a certain data
43
+ # generator type were processed in the derived class constructor)
44
+ #
45
+ for (keyword, value) in base_kwargs.items():
46
+
47
+ if keyword.startswith('attribute'):
48
+ basefunctions.check_is_non_empty_string('attribute_name', value)
49
+ self.attribute_name = value
50
+
51
+ else:
52
+ raise Exception(
53
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
54
+ )
55
+
56
+ basefunctions.check_is_non_empty_string('attribute_name', self.attribute_name)
57
+
58
+ # Check the content of the attribute name string for certain characters
59
+ # that would pose problems when generating comma separated values (CSV)
60
+ # files.
61
+ #
62
+ if (
63
+ ("'" in self.attribute_name)
64
+ or ('"' in self.attribute_name)
65
+ or ("`" in self.attribute_name)
66
+ or (',' in self.attribute_name)
67
+ or (";" in self.attribute_name)
68
+ or ('\t' in self.attribute_name)
69
+ ):
70
+ raise Exception(
71
+ 'Illegal character (such as comma, semi-colon or'
72
+ + 'quote in attribute name'
73
+ )
74
+
75
+ # ---------------------------------------------------------------------------
76
+
77
+ def create_attribute_value(self):
78
+ """Method which creates and returns one attribute value.
79
+ See implementations in derived classes for details.
80
+ """
81
+
82
+ raise Exception('Override abstract method in derived class')
83
+
84
+
85
+ # =============================================================================
86
+
87
+
88
+ class GenerateFreqAttribute(GenerateAttribute):
89
+ """Generate an attribute where values are retrieved from a lookup table that
90
+ contains categorical attribute values and their frequencies.
91
+
92
+ The additional argument (besides the base class argument 'attribute_name')
93
+ that has to be set when this attribute type is initialised are:
94
+
95
+ freq_file_name The name of the file which contains the attribute values
96
+ and their frequencies.
97
+
98
+ This file must be in comma separated values (CSV) format
99
+ with the first column being the attribute values and the
100
+ second column their counts (positive integer numbers).
101
+
102
+ Each attribute value must only occur once in the
103
+ frequency file.
104
+
105
+ has_header_line A flag, set to True or False, that has to be set
106
+ according to if the frequency file starts with a header
107
+ line or not.
108
+
109
+ unicode_encoding The Unicode encoding (a string name) of the file.
110
+ """
111
+
112
+ # ---------------------------------------------------------------------------
113
+
114
+ def __init__(self, **kwargs):
115
+ """Constructor. Process the derived keywords first, then call the base
116
+ class constructor.
117
+ """
118
+
119
+ self.attribute_type = 'Frequency'
120
+ self.freq_file_name = None
121
+ self.has_header_line = None
122
+ self.unicode_encoding = None
123
+ self.attr_value_list = [] # The list of attribute values to be loaded
124
+
125
+ # Process all keyword arguments
126
+ #
127
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
128
+
129
+ for (keyword, value) in kwargs.items():
130
+
131
+ if keyword.startswith('freq'):
132
+ basefunctions.check_is_non_empty_string('freq_file_name', value)
133
+ self.freq_file_name = value
134
+
135
+ elif keyword.startswith('has'):
136
+ basefunctions.check_is_flag('has_header_line', value)
137
+ self.has_header_line = value
138
+
139
+ elif keyword.startswith('unicode'):
140
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
141
+ self.unicode_encoding = value
142
+
143
+ else:
144
+ base_kwargs[keyword] = value
145
+
146
+ GenerateAttribute.__init__(self, base_kwargs) # Process base arguments
147
+
148
+ # Check if the necessary variables have been set
149
+ #
150
+ basefunctions.check_is_non_empty_string('freq_file_name', self.freq_file_name)
151
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
152
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
153
+
154
+ # Load the frequency file - - - - - - - - - - - - - - - - - - - - - - - -
155
+ #
156
+ header_list, freq_file_data = basefunctions.read_csv_file(
157
+ self.freq_file_name, self.unicode_encoding, self.has_header_line
158
+ )
159
+
160
+ val_dict = {} # The attribute values to be loaded from file and their
161
+ # counts or frequencies
162
+
163
+ # Process values from file and their frequencies
164
+ #
165
+ for line, rec_list in enumerate(freq_file_data):
166
+ if len(rec_list) != 2:
167
+ raise Exception(
168
+ 'Illegal format in frequency file %s: %s' % (self.freq_file_name, line)
169
+ )
170
+ line_val = rec_list[0].strip()
171
+ try:
172
+ line_count = int(rec_list[1])
173
+ except:
174
+ raise Exception(
175
+ 'Value count given is not an integer number: %s' % (rec_list[1])
176
+ )
177
+
178
+ if line_val == '':
179
+ raise Exception(
180
+ 'Empty attribute value in frequency file %s' % (self.freq_file_name)
181
+ )
182
+ basefunctions.check_is_positive('line_count', line_count)
183
+
184
+ if line_val in val_dict:
185
+ raise Exception(
186
+ 'Attribute values "%s" occurs twice in ' % (line_val)
187
+ + 'frequency file %s' % (self.freq_file_name)
188
+ )
189
+
190
+ val_dict[line_val] = line_count
191
+
192
+ val_list = [] # The list of attribute values, with values repeated
193
+ # according to their frequencies
194
+
195
+ # Generate a list of values according to their counts
196
+ #
197
+ for (attr_val, val_count) in val_dict.items():
198
+
199
+ # Append value as many times as given in their counts
200
+ #
201
+ new_list = [attr_val] * val_count
202
+ val_list += new_list
203
+
204
+ random.shuffle(val_list) # Randomly shuffle the list of values
205
+
206
+ self.attr_value_list = val_list
207
+
208
+ # ---------------------------------------------------------------------------
209
+
210
+ def create_attribute_value(self):
211
+ """Method which creates and returns one attribute value randomly selected
212
+ from the attribute value lookup table.
213
+ """
214
+
215
+ assert self.attr_value_list != []
216
+
217
+ return random.choice(self.attr_value_list)
218
+
219
+
220
+ # =============================================================================
221
+
222
+
223
+ class GenerateFuncAttribute(GenerateAttribute):
224
+ """Generate an attribute where values are retrieved from a function that
225
+ creates values according to some specification.
226
+
227
+ Such functions include creating telephone numbers or social security
228
+ numbers with a certain structure, or numerical values normally or
229
+ uniformly distributed according to some parameter setting.
230
+
231
+ The additional argument (besides the base class argument 'attribute_name')
232
+ that has to be set when this attribute type is initialised are:
233
+
234
+ function A Python function that, when called, has to return a string
235
+ value that is created according to some specification.
236
+
237
+ parameters A list of one or more parameters (maximum 5) passed to the
238
+ function when it is called.
239
+ """
240
+
241
+ # ---------------------------------------------------------------------------
242
+
243
+ def __init__(self, **kwargs):
244
+ """Constructor. Process the derived keywords first, then call the base
245
+ class constructor.
246
+ """
247
+
248
+ self.attribute_type = 'Function'
249
+ self.function = None
250
+ self.parameters = None
251
+
252
+ # Process all keyword arguments
253
+ #
254
+ base_kwargs = {} # Dictionary, will contain unprocessed arguments
255
+
256
+ for (keyword, value) in kwargs.items():
257
+
258
+ if keyword.startswith('funct'):
259
+ basefunctions.check_is_function_or_method('function', value)
260
+ self.function = value
261
+
262
+ elif keyword.startswith('para'):
263
+ basefunctions.check_is_list('parameters', value)
264
+ if len(value) > 5:
265
+ raise Exception('Maximum five parameters allowed for function call')
266
+ self.parameters = value
267
+
268
+ else:
269
+ base_kwargs[keyword] = value
270
+
271
+ GenerateAttribute.__init__(self, base_kwargs) # Process base arguments
272
+
273
+ # Check if the necessary variables have been set
274
+ #
275
+ basefunctions.check_is_function_or_method('function', self.function)
276
+
277
+ # Check if the function does return a string (five different possibilities,
278
+ # depending upon number of parameters)
279
+ #
280
+ if (self.parameters == None) or (len(self.parameters) == 0):
281
+ funct_ret = self.function()
282
+ elif len(self.parameters) == 1:
283
+ funct_ret = self.function(self.parameters[0])
284
+ elif len(self.parameters) == 2:
285
+ funct_ret = self.function(self.parameters[0], self.parameters[1])
286
+ elif len(self.parameters) == 3:
287
+ funct_ret = self.function(
288
+ self.parameters[0], self.parameters[1], self.parameters[2]
289
+ )
290
+ elif len(self.parameters) == 4:
291
+ funct_ret = self.function(
292
+ self.parameters[0],
293
+ self.parameters[1],
294
+ self.parameters[2],
295
+ self.parameters[3],
296
+ )
297
+ else:
298
+ funct_ret = self.function(
299
+ self.parameters[0],
300
+ self.parameters[1],
301
+ self.parameters[2],
302
+ self.parameters[3],
303
+ self.parameters[4],
304
+ )
305
+
306
+ if not isinstance(funct_ret, str):
307
+ raise Exception(
308
+ (
309
+ 'Function provided does not return a string value:',
310
+ self.function,
311
+ type(funct_ret),
312
+ )
313
+ )
314
+
315
+ # ---------------------------------------------------------------------------
316
+
317
+ def create_attribute_value(self):
318
+ """Method which creates and returns one attribute value generated by the
319
+ function provided.
320
+ """
321
+
322
+ if self.parameters == None:
323
+ funct_ret = self.function()
324
+ elif len(self.parameters) == 1:
325
+ funct_ret = self.function(self.parameters[0])
326
+ elif len(self.parameters) == 2:
327
+ funct_ret = self.function(self.parameters[0], self.parameters[1])
328
+ elif len(self.parameters) == 3:
329
+ funct_ret = self.function(
330
+ self.parameters[0], self.parameters[1], self.parameters[2]
331
+ )
332
+ elif len(self.parameters) == 4:
333
+ funct_ret = self.function(
334
+ self.parameters[0],
335
+ self.parameters[1],
336
+ self.parameters[2],
337
+ self.parameters[3],
338
+ )
339
+ else:
340
+ funct_ret = self.function(
341
+ self.parameters[0],
342
+ self.parameters[1],
343
+ self.parameters[2],
344
+ self.parameters[3],
345
+ self.parameters[4],
346
+ )
347
+ return funct_ret
348
+
349
+
350
+ # =============================================================================
351
+ # Classes for generating compound attributes (fields) of the data set
352
+ # =============================================================================
353
+
354
+
355
+ class GenerateCompoundAttribute:
356
+ """Base class for the definition of compound attributes (fields) to be
357
+ generated.
358
+
359
+ This class and all of its derived classes provide methods that allow the
360
+ definition of several (are least two) attributes and the parameters
361
+ necessary for their generation.
362
+
363
+ This base class does not have any generic variables that need to be set.
364
+ """
365
+
366
+ # ---------------------------------------------------------------------------
367
+
368
+ def __init__(self, base_kwargs):
369
+ """Constructor. See implementations in derived classes for details."""
370
+
371
+ raise Exception('Override abstract method in derived class')
372
+
373
+ # ---------------------------------------------------------------------------
374
+
375
+ def create_attribute_value(self):
376
+ """Method which creates and returns several (compound) attribute values.
377
+ See implementations in derived classes for details.
378
+ """
379
+
380
+ raise Exception('Override abstract method in derived class')
381
+
382
+
383
+ # =============================================================================
384
+
385
+
386
+ class GenerateCateCateCompoundAttribute(GenerateCompoundAttribute):
387
+ """Generate two attributes, both containing categorical values, where the
388
+ values of the second attribute depend upon the values in the first
389
+ attribute.
390
+
391
+ This for example allows the modelling of:
392
+ - city location values that depend upon gender values, or
393
+ - medication name values that depend upon gender values.
394
+
395
+ The arguments that have to be set when this attribute type is initialised
396
+ are:
397
+
398
+ categorical1_attribute_name The name of the first categorical attribute
399
+ that will be generated. This name will be
400
+ used in the header line to be written into
401
+ the output file.
402
+
403
+ categorical2_attribute_name The name of the second categorical attribute
404
+ that will be generated. This name will be
405
+ used in the header line to be written into
406
+ the output file.
407
+
408
+ lookup_file_name Name of the file which contains the values of
409
+ the first categorical attribute, and for each
410
+ of these values the names of the categories
411
+ and their counts of the second categorical
412
+ attribute. This file format is further
413
+ explained below.
414
+
415
+ has_header_line A flag, set to True or False, that has to be
416
+ set according to if the look-up file starts
417
+ with a header line or not.
418
+
419
+ unicode_encoding The Unicode encoding (a string name) of the
420
+ file.
421
+
422
+ The format of the look-up file is:
423
+
424
+ # Comment lines start with the # character
425
+ cate_attr1_val,count,cate_attr2_val1,count1,cate_attr2_val2,count2, \
426
+ cate_attr2_val3,count3,cate_attr2_val4,count4, ...
427
+
428
+ The look-up file is a comma separated values (CSV) file which contains
429
+ two types of rows:
430
+ A) The first type of row contains the following columns:
431
+ 1) A categorical value. For all possible values of the first
432
+ categorical attribute, one row must be specified in this look-up
433
+ file.
434
+ 2) Count of this categorical value (a positive integer number). This
435
+ determines the likelihood of how often a certain categorical value
436
+ will be chosen. This count must be a positive integer number.
437
+ 3) The first categorical value of the second attribute.
438
+ 4) The count (positive integer number) of this first categorical
439
+ value.
440
+ 5) The second categorical value of the second attribute.
441
+ 6) The count of this second categorical value.
442
+
443
+ ...
444
+
445
+ X) A '\' character, which indicates that the following line (row)
446
+ contains further categorical values and their counts from the
447
+ second attribute.
448
+
449
+ B) The second type of row contains the following columns:
450
+ 1) A categorical value of the second attribute.
451
+ 2) The count of this categorical value.
452
+ 3) Another categorical value of the second attribute.
453
+ 4) The count of this categorical value.
454
+
455
+ ...
456
+
457
+ Example:
458
+ male,60,canberra,7, \
459
+ sydney,30,melbourne,45, \
460
+ perth,18
461
+ female,40,canberra,10,sydney,40, \
462
+ melbourne,20,brisbane,30,hobart,5,\
463
+ perth,20
464
+ """
465
+
466
+ # ---------------------------------------------------------------------------
467
+
468
+ def __init__(self, **kwargs):
469
+ """Constructor. Process the derived keywords first, then call the base
470
+ class constructor.
471
+ """
472
+
473
+ # General attributes for all data set generators
474
+ #
475
+ self.number_of_atttributes = 2
476
+ self.attribute_type = 'Compound-Categorical-Categorical'
477
+
478
+ for (keyword, value) in kwargs.items():
479
+
480
+ if keyword.startswith('categorical1'):
481
+ basefunctions.check_is_non_empty_string(
482
+ 'categorical1_attribute_name', value
483
+ )
484
+ self.categorical1_attribute_name = value
485
+
486
+ elif keyword.startswith('categorical2'):
487
+ basefunctions.check_is_non_empty_string(
488
+ 'categorical2_attribute_name', value
489
+ )
490
+ self.categorical2_attribute_name = value
491
+
492
+ elif keyword.startswith('look'):
493
+ basefunctions.check_is_non_empty_string('lookup_file_name', value)
494
+ self.lookup_file_name = value
495
+
496
+ elif keyword.startswith('has'):
497
+ basefunctions.check_is_flag('has_header_line', value)
498
+ self.has_header_line = value
499
+
500
+ elif keyword.startswith('unicode'):
501
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
502
+ self.unicode_encoding = value
503
+
504
+ else:
505
+ raise (
506
+ Exception,
507
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword)),
508
+ )
509
+
510
+ # Check if the necessary variables have been set
511
+ #
512
+ basefunctions.check_is_non_empty_string(
513
+ 'categorical1_attribute_name', self.categorical1_attribute_name
514
+ )
515
+ basefunctions.check_is_non_empty_string(
516
+ 'categorical2_attribute_name', self.categorical2_attribute_name
517
+ )
518
+ basefunctions.check_is_non_empty_string('lookup_file_name', self.lookup_file_name)
519
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
520
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
521
+
522
+ if self.categorical1_attribute_name == self.categorical2_attribute_name:
523
+ raise Exception('Both attribute names are the same')
524
+
525
+ # Load the lookup file - - - - - - - - - - - - - - - - - - - - - - - - - -
526
+ #
527
+ header_list, lookup_file_data = basefunctions.read_csv_file(
528
+ self.lookup_file_name, self.unicode_encoding, self.has_header_line
529
+ )
530
+
531
+ cate_val1_dict = {} # The categorical values from attribute 1 to be loaded
532
+ # from file and their counts.
533
+
534
+ cate_val2_dict = {} # The categorical values from attribute 1 to be loaded
535
+ # as keys and lists of categorical values (according
536
+ # to their counts) from attribute 2 as values.
537
+
538
+ # Process attribute values from file and their details
539
+ #
540
+ i = 0 # Line counter in file data
541
+
542
+ while i < len(lookup_file_data):
543
+ rec_list = lookup_file_data[i]
544
+
545
+ # First line must contain categorical value of the first attribute
546
+ #
547
+ if len(rec_list) < 2: # Need at least two values in each line
548
+ raise Exception(
549
+ 'Illegal format in lookup file %s: %s'
550
+ % (self.freq_file_name, str(rec_list))
551
+ )
552
+ cate_attr1_val = rec_list[0].strip()
553
+ try:
554
+ cate_attr1_count = int(rec_list[1])
555
+ except:
556
+ raise Exception(
557
+ 'Value count given for attribute 1 is not an '
558
+ + 'integer number: %s' % (rec_list[1])
559
+ )
560
+
561
+ if cate_attr1_val == '':
562
+ raise Exception(
563
+ 'Empty categorical attribute 1 value in lookup '
564
+ + 'file %s' % (self.lookup_file_name)
565
+ )
566
+ basefunctions.check_is_positive('cate_attr1_count', cate_attr1_count)
567
+
568
+ if cate_attr1_val in cate_val1_dict:
569
+ raise Exception(
570
+ 'Attribute 1 value "%s" occurs twice in ' % (cate_attr1_val)
571
+ + 'lookup file %s' % (self.lookup_file_name)
572
+ )
573
+
574
+ cate_val1_dict[cate_attr1_val] = cate_attr1_count
575
+
576
+ # Process values for second categorical attribute in this line
577
+ #
578
+ cate_attr2_data = rec_list[2:] # All values and counts of attribute 2
579
+
580
+ this_cate_val2_dict = {} # Values in second categorical attribute for
581
+ # this categorical value from first attribute
582
+
583
+ while cate_attr2_data != []:
584
+ if len(cate_attr2_data) == 1:
585
+ if cate_attr2_data[0] != '\\':
586
+ raise Exception(
587
+ 'Line in categorical look-up file has illegal' + 'format.'
588
+ )
589
+ # Get the next record from file data with a continuation of the
590
+ # categorical values from the second attribute
591
+ #
592
+ i += 1
593
+ cate_attr2_data = lookup_file_data[i]
594
+ if len(cate_attr2_data) < 2:
595
+ raise Exception(
596
+ 'Illegal format in lookup file %s: %s'
597
+ % (self.freq_file_name, str(cate_attr2_data))
598
+ )
599
+
600
+ cate_attr2_val = cate_attr2_data[0]
601
+ try:
602
+ cate_attr2_count = int(cate_attr2_data[1])
603
+ except:
604
+ raise Exception(
605
+ 'Value count given for attribute 2 is not an '
606
+ + 'integer number: %s' % (cate_attr2_data[1])
607
+ )
608
+
609
+ if cate_attr2_val == '':
610
+ raise Exception(
611
+ 'Empty categorical attribute 2 value in lookup'
612
+ + ' file %s' % (self.lookup_file_name)
613
+ )
614
+ basefunctions.check_is_positive('cate_attr2_count', cate_attr2_count)
615
+
616
+ if cate_attr2_val in cate_val2_dict:
617
+ raise Exception(
618
+ 'Attribute 2 value "%s" occurs twice in ' % (cate_attr2_val)
619
+ + 'lookup file %s' % (self.lookup_file_name)
620
+ )
621
+
622
+ this_cate_val2_dict[cate_attr2_val] = cate_attr2_count
623
+
624
+ cate_attr2_data = cate_attr2_data[2:]
625
+
626
+ # Generate a list of values according to their counts
627
+ #
628
+ cate_attr2_val_list = []
629
+
630
+ for (cate_attr2_val, val2_count) in this_cate_val2_dict.items():
631
+
632
+ # Append value as many times as given in their counts
633
+ #
634
+ new_list = [cate_attr2_val] * val2_count
635
+ cate_attr2_val_list += new_list
636
+
637
+ random.shuffle(cate_attr2_val_list) # Randomly shuffle the list of values
638
+
639
+ cate_val2_dict[cate_attr1_val] = cate_attr2_val_list
640
+
641
+ # Go to next line in file data
642
+ #
643
+ i += 1
644
+
645
+ # Generate a list of values according to their counts
646
+ #
647
+ cate_attr1_val_list = []
648
+
649
+ for (cate_attr1_val, val1_count) in cate_val1_dict.items():
650
+
651
+ # Append value as many times as given in their counts
652
+ #
653
+ new_list = [cate_attr1_val] * val1_count
654
+ cate_attr1_val_list += new_list
655
+
656
+ random.shuffle(cate_attr1_val_list) # Randomly shuffle the list of values
657
+
658
+ self.cate_attr1_val_list = cate_attr1_val_list
659
+ self.cate_val2_dict = cate_val2_dict
660
+
661
+ # ---------------------------------------------------------------------------
662
+
663
+ def create_attribute_values(self):
664
+ """Method which creates and returns two categorical attribute values, where
665
+ the second value depends upon the first value. Both categorical values
666
+ are randomly selected according to the provided frequency distributions.
667
+ """
668
+
669
+ assert self.cate_attr1_val_list != []
670
+ assert self.cate_val2_dict != {}
671
+
672
+ cate_attr1_val = random.choice(self.cate_attr1_val_list)
673
+
674
+ cate_attr2_list = self.cate_val2_dict[cate_attr1_val]
675
+
676
+ cate_attr2_val = random.choice(cate_attr2_list)
677
+
678
+ return cate_attr1_val, cate_attr2_val
679
+
680
+
681
+ # =============================================================================
682
+
683
+
684
+ class GenerateCateContCompoundAttribute(GenerateCompoundAttribute):
685
+ """Generate two attributes, one containing categorical values and the other
686
+ continuous values, where the continuous values depend upon the categorical
687
+ values.
688
+
689
+ This for example allows the modelling of:
690
+ - salary values that depend upon gender values, or
691
+ - blood pressure values that depend upon age values.
692
+
693
+ The arguments that have to be set when this attribute type is initialised
694
+ are:
695
+
696
+ categorical_attribute_name The name of the categorical attribute that
697
+ will be generated. This name will be used in
698
+ the header line to be written into the output
699
+ file.
700
+
701
+ continuous_attribute_name The name of the continuous attribute that will
702
+ be generated. This name will be used in the
703
+ header line to be written into the output
704
+ file.
705
+
706
+ lookup_file_name Name of the file which contains the values of
707
+ the continuous attribute, and for each of these
708
+ values the name of a function (and its
709
+ parameters) that is used to generate the
710
+ continuous values. This file format is further
711
+ explained below.
712
+
713
+ has_header_line A flag, set to True or False, that has to be
714
+ set according to if the look-up file starts
715
+ with a header line or not.
716
+
717
+ unicode_encoding The Unicode encoding (a string name) of the
718
+ file.
719
+
720
+ continuous_value_type The format of how continuous values are
721
+ returned when they are generated. Possible
722
+ values are 'int', so integer values are
723
+ returned; or 'float1', 'float2', to 'float9',
724
+ in which case floating-point values with the
725
+ specified number of digits behind the comma
726
+ are returned.
727
+
728
+ The format of the look-up file is:
729
+
730
+ # Comment lines start with the # character
731
+ cate_val,count,funct_name,funct_param_1,...,funct_param_N
732
+
733
+ The look-up file is a comma separated values (CSV) file with the following
734
+ columns:
735
+ 1) A categorical value. For all possible categorical values of an
736
+ attribute, one row must be specified in this look-up file.
737
+
738
+ 2) Count of this categorical value (a positive integer number). This
739
+ determines the likelihood of how often a certain categorical value will
740
+ be chosen.
741
+
742
+ 3) A function which generates the continuous value for this categorical
743
+ value. Implemented functions currently are:
744
+ - uniform
745
+ - normal
746
+
747
+ 4) The parameters required for the function that generates the continuous
748
+ values. They are:
749
+ - uniform: min_val, max_val
750
+ - normal: mu, sigma, min_val, max_val
751
+ (min_val and max_val can be set to None in which case no
752
+ minimum or maximum is enforced)
753
+
754
+ Example:
755
+ male,60,uniform,20000,100000
756
+ female,40,normal,35000,100000,10000,None
757
+ """
758
+
759
+ # ---------------------------------------------------------------------------
760
+
761
+ def __init__(self, **kwargs):
762
+ """Constructor. Process the derived keywords first, then call the base
763
+ class constructor.
764
+ """
765
+
766
+ # General attributes for all data set generators
767
+ #
768
+ self.number_of_atttributes = 2
769
+ self.attribute_type = 'Compound-Categorical-Continuous'
770
+
771
+ for (keyword, value) in kwargs.items():
772
+
773
+ if keyword.startswith('cate'):
774
+ basefunctions.check_is_non_empty_string(
775
+ 'categorical_attribute_name', value
776
+ )
777
+ self.categorical_attribute_name = value
778
+
779
+ elif keyword.startswith('continuous_a'):
780
+ basefunctions.check_is_non_empty_string('continuous_attribute_name', value)
781
+ self.continuous_attribute_name = value
782
+
783
+ elif keyword.startswith('continuous_v'):
784
+ basefunctions.check_is_non_empty_string('continuous_value_type', value)
785
+ basefunctions.check_is_valid_format_str('continuous_value_type', value)
786
+ self.continuous_value_type = value
787
+
788
+ elif keyword.startswith('look'):
789
+ basefunctions.check_is_non_empty_string('lookup_file_name', value)
790
+ self.lookup_file_name = value
791
+
792
+ elif keyword.startswith('has'):
793
+ basefunctions.check_is_flag('has_header_line', value)
794
+ self.has_header_line = value
795
+
796
+ elif keyword.startswith('unicode'):
797
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
798
+ self.unicode_encoding = value
799
+
800
+ else:
801
+ raise Exception(
802
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
803
+ )
804
+
805
+ # Check if the necessary variables have been set
806
+ #
807
+ basefunctions.check_is_non_empty_string(
808
+ 'categorical_attribute_name', self.categorical_attribute_name
809
+ )
810
+ basefunctions.check_is_non_empty_string(
811
+ 'continuous_attribute_name', self.continuous_attribute_name
812
+ )
813
+ basefunctions.check_is_non_empty_string('lookup_file_name', self.lookup_file_name)
814
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
815
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
816
+
817
+ if self.categorical_attribute_name == self.continuous_attribute_name:
818
+ raise Exception('Both attribute names are the same')
819
+
820
+ basefunctions.check_is_valid_format_str(
821
+ 'continuous_value_type', self.continuous_value_type
822
+ )
823
+
824
+ # Load the lookup file - - - - - - - - - - - - - - - - - - - - - - - - - -
825
+ #
826
+ header_list, lookup_file_data = basefunctions.read_csv_file(
827
+ self.lookup_file_name, self.unicode_encoding, self.has_header_line
828
+ )
829
+
830
+ cate_val_dict = {} # The categorical attribute values to be loaded from
831
+ # file and their counts.
832
+ cont_funct_dict = {} # For each categorical attribute value the details of
833
+ # the function used for the continuous attribute.
834
+
835
+ # Process attribute values from file and their details
836
+ #
837
+ for rec_list in lookup_file_data:
838
+ if len(rec_list) not in [5, 7]:
839
+ raise Exception(
840
+ 'Illegal format in lookup file %s: %s'
841
+ % (self.lookup_file_name, str(rec_list))
842
+ )
843
+ cate_attr_val = rec_list[0].strip()
844
+ try:
845
+ cate_attr_count = int(rec_list[1])
846
+ except:
847
+ raise Exception(
848
+ 'Value count given for categorical attribute is '
849
+ + 'not an integer number: %s' % (rec_list[1])
850
+ )
851
+ cont_attr_funct = rec_list[2].strip()
852
+
853
+ if cate_attr_val == '':
854
+ raise Exception(
855
+ 'Empty categorical attribute value in lookup file %s'
856
+ % (self.lookup_file_name)
857
+ )
858
+ if cate_attr_count <= 0:
859
+ raise Exception(
860
+ 'Count given for categorical attribute is not '
861
+ + 'positive for value "%s" in lookup ' % (cate_attr_val)
862
+ + 'file %s' % (self.lookup_file_name)
863
+ )
864
+
865
+ if cate_attr_val in cate_val_dict:
866
+ raise Exception(
867
+ 'Attribute values "%s" occurs twice in ' % (cate_attr_val)
868
+ + 'lookup file %s' % (self.lookup_file_name)
869
+ )
870
+
871
+ if cont_attr_funct not in ['uniform', 'normal']:
872
+ raise Exception(
873
+ 'Illegal continuous attribute function given: "%s"' % (cont_attr_funct)
874
+ + ' in lookup file %s' % (self.lookup_file_name)
875
+ )
876
+
877
+ cate_val_dict[cate_attr_val] = cate_attr_count
878
+
879
+ # Get function parameters from file data
880
+ #
881
+ if cont_attr_funct == 'uniform':
882
+ cont_attr_funct_min_val = float(rec_list[3])
883
+ basefunctions.check_is_number(
884
+ 'cont_attr_funct_min_val', cont_attr_funct_min_val
885
+ )
886
+
887
+ cont_attr_funct_max_val = float(rec_list[4])
888
+ basefunctions.check_is_number(
889
+ 'cont_attr_funct_max_val', cont_attr_funct_max_val
890
+ )
891
+
892
+ cont_funct_dict[cate_attr_val] = [
893
+ cont_attr_funct,
894
+ cont_attr_funct_min_val,
895
+ cont_attr_funct_max_val,
896
+ ]
897
+
898
+ elif cont_attr_funct == 'normal':
899
+ cont_attr_funct_mu = float(rec_list[3])
900
+ basefunctions.check_is_number('cont_attr_funct_mu', cont_attr_funct_mu)
901
+
902
+ cont_attr_funct_sigma = float(rec_list[4])
903
+ basefunctions.check_is_number(
904
+ 'cont_attr_funct_sigma', cont_attr_funct_sigma
905
+ )
906
+ try:
907
+ cont_attr_funct_min_val = float(rec_list[5])
908
+ except:
909
+ cont_attr_funct_min_val = None
910
+ if cont_attr_funct_min_val != None:
911
+ basefunctions.check_is_number(
912
+ 'cont_attr_funct_min_val', cont_attr_funct_min_val
913
+ )
914
+ try:
915
+ cont_attr_funct_max_val = float(rec_list[6])
916
+ except:
917
+ cont_attr_funct_max_val = None
918
+ if cont_attr_funct_max_val != None:
919
+ basefunctions.check_is_number(
920
+ 'cont_attr_funct_max_val', cont_attr_funct_max_val
921
+ )
922
+
923
+ cont_funct_dict[cate_attr_val] = [
924
+ cont_attr_funct,
925
+ cont_attr_funct_mu,
926
+ cont_attr_funct_sigma,
927
+ cont_attr_funct_min_val,
928
+ cont_attr_funct_max_val,
929
+ ]
930
+
931
+ # Generate a list of values according to their counts
932
+ #
933
+ cate_attr_val_list = []
934
+
935
+ for (cate_attr_val, val_count) in cate_val_dict.items():
936
+
937
+ # Append value as many times as given in their counts
938
+ #
939
+ new_list = [cate_attr_val] * val_count
940
+ cate_attr_val_list += new_list
941
+
942
+ random.shuffle(cate_attr_val_list) # Randomly shuffle the list of values
943
+
944
+ self.cate_attr_val_list = cate_attr_val_list
945
+ self.cont_funct_dict = cont_funct_dict
946
+
947
+ # ---------------------------------------------------------------------------
948
+
949
+ def create_attribute_values(self):
950
+ """Method which creates and returns two attribute values, one categorical
951
+ and one continuous, with the categorical value randomly selected
952
+ according to the provided frequency distribution, and the continuous
953
+ value according to the selected function and its parameters.
954
+ """
955
+
956
+ assert self.cate_attr_val_list != []
957
+
958
+ cate_attr_val = random.choice(self.cate_attr_val_list)
959
+
960
+ # Get the details of the function and generate the continuous value
961
+ #
962
+ funct_details = self.cont_funct_dict[cate_attr_val]
963
+ funct_name = funct_details[0]
964
+
965
+ if funct_name == 'uniform':
966
+ cont_attr_val = random.uniform(funct_details[1], funct_details[2])
967
+
968
+ elif funct_name == 'normal':
969
+ mu = funct_details[1]
970
+ sigma = funct_details[2]
971
+ min_val = funct_details[3]
972
+ max_val = funct_details[4]
973
+ in_range = False
974
+
975
+ cont_attr_val = random.normalvariate(mu, sigma)
976
+
977
+ while in_range == False:
978
+ if ((min_val != None) and (cont_attr_val < min_val)) or (
979
+ (max_val != None) and (cont_attr_val > max_val)
980
+ ):
981
+ in_range = False
982
+ cont_attr_val = random.normalvariate(mu, sigma)
983
+ else:
984
+ in_range = True
985
+
986
+ if min_val != None:
987
+ assert cont_attr_val >= min_val
988
+ if max_val != None:
989
+ assert cont_attr_val <= max_val
990
+
991
+ else:
992
+ raise Exception(('Illegal continuous function given:', funct_name))
993
+
994
+ cont_attr_val_str = basefunctions.float_to_str(
995
+ cont_attr_val, self.continuous_value_type
996
+ )
997
+
998
+ return cate_attr_val, cont_attr_val_str
999
+
1000
+
1001
+ # =============================================================================
1002
+
1003
+
1004
+ class GenerateCateCateContCompoundAttribute(GenerateCompoundAttribute):
1005
+ """Generate three attributes, thefirst two containing categorical values and
1006
+ the third containing continuous values, where the values of the second
1007
+ attribute depend upon the values in the first attribute, and the values
1008
+ of the third attribute depend upon both the values of the first and second
1009
+ attribute.
1010
+
1011
+ This for example allows the modelling of:
1012
+ - blood pressure depending upon gender and city of residence values, or
1013
+ - salary depending upon gender and profession values.
1014
+
1015
+ The arguments that have to be set when this attribute type is initialised
1016
+ are:
1017
+
1018
+ categorical1_attribute_name The name of the first categorical attribute
1019
+ that will be generated. This name will be
1020
+ used in the header line to be written into
1021
+ the output file.
1022
+
1023
+ categorical2_attribute_name The name of the second categorical attribute
1024
+ that will be generated. This name will be
1025
+ used in the header line to be written into
1026
+ the output file.
1027
+
1028
+ continuous_attribute_name The name of the continuous attribute that
1029
+ will be generated. This name will be used in
1030
+ the header line to be written into the output
1031
+ file.
1032
+
1033
+ lookup_file_name Name of the file which contains the values
1034
+ of the first categorical attribute, and for
1035
+ each of these values the names of the
1036
+ categories and their counts of the second
1037
+ categorical attribute, and for each of these
1038
+ values the name of a function (and its
1039
+ parameters) that is used to generate the
1040
+ continuous values. This file format is
1041
+ further explained below.
1042
+
1043
+ has_header_line A flag, set to True or False, that has to be
1044
+ set according to if the look-up file starts
1045
+ with a header line or not.
1046
+
1047
+ unicode_encoding The Unicode encoding (a string name) of the
1048
+ file.
1049
+
1050
+ continuous_value_type The format of how continuous values are
1051
+ returned when they are generated. Possible
1052
+ values are 'int', so integer values are
1053
+ returned; or 'float1', 'float2', to
1054
+ 'float9', in which case floating-point
1055
+ values with the specified number of digits
1056
+ behind the comma are returned.
1057
+
1058
+ The format of the look-up file is:
1059
+
1060
+ # Comment lines start with the # character
1061
+ cate_attr1_val1,count
1062
+ cate_attr2_val1,count,funct_name,funct_param_1,...,funct_param_N
1063
+ cate_attr2_val2,count,funct_name,funct_param_1,...,funct_param_N
1064
+ cate_attr2_val3,count,funct_name,funct_param_1,...,funct_param_N
1065
+ ...
1066
+ cate_attr2_valX,count,funct_name,funct_param_1,...,funct_param_N
1067
+ cate_attr1_val2,count
1068
+ cate_attr2_val1,count,funct_name,funct_param_1,...,funct_param_N
1069
+ cate_attr2_val2,count,funct_name,funct_param_1,...,funct_param_N
1070
+ cate_attr2_val3,count,funct_name,funct_param_1,...,funct_param_N
1071
+ ...
1072
+ cate_attr2_valX,count,funct_name,funct_param_1,...,funct_param_N
1073
+ cate_attr1_val3,count
1074
+ ...
1075
+
1076
+
1077
+ The look-up file is a comma separated values (CSV) file with the following
1078
+ structure:
1079
+
1080
+ A) One row that contains two values:
1081
+ 1) A categorical value of the first attribute. For all possible values
1082
+ of the first categorical attribute, one row must be specified in
1083
+ this look-up file.
1084
+ 2) The count of this categorical value (a positive integer number).
1085
+ This determines the likelihood of how often a certain categorical
1086
+ value will be chosen.
1087
+
1088
+ B) After a row with two values, as described under A), one or more rows
1089
+ containing the following values in columns must be given:
1090
+ 1) A categorical value from the second categorical attribute.
1091
+ 2) The count of this categorical value (a positive integer number).
1092
+ This determines the likelihood of how often a certain categorical
1093
+ value will be chosen.
1094
+ 3) A function which generates the continuous value for this categorical
1095
+ value. Implemented functions currently are:
1096
+ - uniform
1097
+ - normal
1098
+ 4) The parameters required for the function that generates the
1099
+ continuous values. They are:
1100
+ - uniform: min_val, max_val
1101
+ - normal: mu, sigma, min_val, max_val
1102
+ (min_val and max_val can be set to None in which case no
1103
+ minimum or maximum is enforced)
1104
+
1105
+ Example:
1106
+ male,60
1107
+ canberra,20,uniform,50000,90000
1108
+ sydney,30,normal,75000,50000,20000,None
1109
+ melbourne,30,uniform,35000,200000
1110
+ perth,20,normal,55000,250000,15000,None
1111
+ female,40
1112
+ canberra,10,normal,45000,10000,None,150000
1113
+ sydney,40,uniform,60000,200000
1114
+ melbourne,20,uniform,50000,1750000
1115
+ brisbane,30,normal,55000,20000,20000,100000
1116
+ """
1117
+
1118
+ # ---------------------------------------------------------------------------
1119
+
1120
+ def __init__(self, **kwargs):
1121
+ """Constructor. Process the derived keywords first, then call the base
1122
+ class constructor.
1123
+ """
1124
+
1125
+ # General attributes for all data set generators
1126
+ #
1127
+ self.number_of_atttributes = 3
1128
+ self.attribute_type = 'Compound-Categorical-Categorical-Continuous'
1129
+
1130
+ for (keyword, value) in kwargs.items():
1131
+
1132
+ if keyword.startswith('categorical1'):
1133
+ basefunctions.check_is_non_empty_string(
1134
+ 'categorical1_attribute_name', value
1135
+ )
1136
+ self.categorical1_attribute_name = value
1137
+
1138
+ elif keyword.startswith('categorical2'):
1139
+ basefunctions.check_is_non_empty_string(
1140
+ 'categorical2_attribute_name', value
1141
+ )
1142
+ self.categorical2_attribute_name = value
1143
+
1144
+ elif keyword.startswith('continuous_a'):
1145
+ basefunctions.check_is_non_empty_string('continuous_attribute_name', value)
1146
+ self.continuous_attribute_name = value
1147
+
1148
+ elif keyword.startswith('continuous_v'):
1149
+ basefunctions.check_is_non_empty_string('continuous_value_type', value)
1150
+ basefunctions.check_is_valid_format_str('continuous_value_type', value)
1151
+ self.continuous_value_type = value
1152
+
1153
+ elif keyword.startswith('look'):
1154
+ basefunctions.check_is_non_empty_string('lookup_file_name', value)
1155
+ self.lookup_file_name = value
1156
+
1157
+ elif keyword.startswith('has'):
1158
+ basefunctions.check_is_flag('has_header_line', value)
1159
+ self.has_header_line = value
1160
+
1161
+ elif keyword.startswith('unicode'):
1162
+ basefunctions.check_is_non_empty_string('unicode_encoding', value)
1163
+ self.unicode_encoding = value
1164
+
1165
+ else:
1166
+ raise Exception(
1167
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
1168
+ )
1169
+
1170
+ # Check if the necessary variables have been set
1171
+ #
1172
+ basefunctions.check_is_non_empty_string(
1173
+ 'categorical1_attribute_name', self.categorical1_attribute_name
1174
+ )
1175
+ basefunctions.check_is_non_empty_string(
1176
+ 'categorical2_attribute_name', self.categorical2_attribute_name
1177
+ )
1178
+ basefunctions.check_is_non_empty_string(
1179
+ 'continuous_attribute_name', self.continuous_attribute_name
1180
+ )
1181
+ basefunctions.check_is_non_empty_string('lookup_file_name', self.lookup_file_name)
1182
+ basefunctions.check_is_flag('has_header_line', self.has_header_line)
1183
+ basefunctions.check_is_non_empty_string('unicode_encoding', self.unicode_encoding)
1184
+
1185
+ if (
1186
+ (self.categorical1_attribute_name == self.categorical2_attribute_name)
1187
+ or (self.categorical1_attribute_name == self.continuous_attribute_name)
1188
+ or (self.categorical2_attribute_name == self.continuous_attribute_name)
1189
+ ):
1190
+ raise Exception('Not all attribute names are different.')
1191
+
1192
+ basefunctions.check_is_valid_format_str(
1193
+ 'continuous_value_type', self.continuous_value_type
1194
+ )
1195
+
1196
+ # Load the lookup file - - - - - - - - - - - - - - - - - - - - - - - - - -
1197
+ #
1198
+ header_list, lookup_file_data = basefunctions.read_csv_file(
1199
+ self.lookup_file_name, self.unicode_encoding, self.has_header_line
1200
+ )
1201
+
1202
+ cate_val1_dict = {} # The categorical values from attribute 1 to be
1203
+ # loaded from file, and their counts.
1204
+
1205
+ cate_val2_dict = {} # The categorical values from attribute 1 as keys
1206
+ # and lists of categorical values (according to their
1207
+ # counts) from attribute 2 as values.
1208
+
1209
+ cont_funct_dict = {} # For each pair of categorical attribute values the
1210
+ # details of the function used for the continuous
1211
+ # attribute.
1212
+
1213
+ # Process attribute values from file and their details
1214
+ #
1215
+ list_counter = 0 # Counter in the list of lookup file data
1216
+ num_file_rows = len(lookup_file_data)
1217
+ rec_list = lookup_file_data[list_counter]
1218
+
1219
+ while list_counter < num_file_rows: # Process one row after another
1220
+
1221
+ if len(rec_list) < 2: # Need at least one categorical value and count
1222
+ raise Exception(
1223
+ 'Illegal format in lookup file %s: %s'
1224
+ % (self.lookup_file_name, str(rec_list))
1225
+ )
1226
+ cate_attr1_val = rec_list[0].strip()
1227
+ try:
1228
+ cate_attr1_count = int(rec_list[1])
1229
+ except:
1230
+ raise Exception(
1231
+ 'Value count given for attribute 1 is not an '
1232
+ + 'integer number: %s' % (rec_list[1])
1233
+ )
1234
+
1235
+ if cate_attr1_val == '':
1236
+ raise Exception(
1237
+ 'Empty categorical attribute value 1 in lookup '
1238
+ + 'file %s' % (self.lookup_file_name)
1239
+ )
1240
+ basefunctions.check_is_positive('cate_attr1_count', cate_attr1_count)
1241
+
1242
+ if cate_attr1_val in cate_val1_dict:
1243
+ raise Exception(
1244
+ 'Attribute value "%s" occurs twice in ' % (cate_attr1_val)
1245
+ + 'lookup file %s' % (self.lookup_file_name)
1246
+ )
1247
+
1248
+ cate_val1_dict[cate_attr1_val] = cate_attr1_count
1249
+
1250
+ # Loop to process values of the second categorical attribute and the
1251
+ # corresponding continuous functions
1252
+ #
1253
+ list_counter += 1
1254
+ rec_list = lookup_file_data[list_counter] # Get values from next line
1255
+
1256
+ this_cate_val2_dict = {} # Values of categorical attribute 2
1257
+ this_cont_funct_dict = {}
1258
+
1259
+ # As long as there are data from the second categorical attribute
1260
+ #
1261
+ while len(rec_list) > 2:
1262
+ cate_attr2_val = rec_list[0].strip()
1263
+ try:
1264
+ cate_attr2_count = int(rec_list[1])
1265
+ except:
1266
+ raise Exception(
1267
+ 'Value count given for categorical attribute 2 '
1268
+ + 'is not an integer number: %s' % (rec_list[1])
1269
+ )
1270
+ cont_attr_funct = rec_list[2].strip()
1271
+
1272
+ if cate_attr2_val == '':
1273
+ raise Exception(
1274
+ 'Empty categorical attribute 2 value in lookup '
1275
+ + 'file %s' % (self.lookup_file_name)
1276
+ )
1277
+ basefunctions.check_is_positive('cate_attr2_count', cate_attr2_count)
1278
+
1279
+ if cate_attr2_val in this_cate_val2_dict:
1280
+ raise Exception(
1281
+ 'Attribute value "%s" occurs twice in ' % (cate_attr2_val)
1282
+ + 'lookup file %s' % (self.lookup_file_name)
1283
+ )
1284
+
1285
+ if cont_attr_funct not in ['uniform', 'normal']:
1286
+ raise Exception(
1287
+ 'Illegal continuous attribute function '
1288
+ + 'given: "%s"' % (cont_attr_funct)
1289
+ + ' in lookup file %s' % (self.lookup_file_name)
1290
+ )
1291
+
1292
+ this_cate_val2_dict[cate_attr2_val] = cate_attr2_count
1293
+
1294
+ # Get function parameters from file data
1295
+ #
1296
+ if cont_attr_funct == 'uniform':
1297
+ cont_attr_funct_min_val = float(rec_list[3])
1298
+ basefunctions.check_is_number(
1299
+ 'cont_attr_funct_min_val', cont_attr_funct_min_val
1300
+ )
1301
+ cont_attr_funct_max_val = float(rec_list[4])
1302
+ basefunctions.check_is_number(
1303
+ 'cont_attr_funct_max_val', cont_attr_funct_max_val
1304
+ )
1305
+
1306
+ this_cont_funct_dict[cate_attr2_val] = [
1307
+ cont_attr_funct,
1308
+ cont_attr_funct_min_val,
1309
+ cont_attr_funct_max_val,
1310
+ ]
1311
+ elif cont_attr_funct == 'normal':
1312
+ cont_attr_funct_mu = float(rec_list[3])
1313
+ cont_attr_funct_sigma = float(rec_list[4])
1314
+ try:
1315
+ cont_attr_funct_min_val = float(rec_list[5])
1316
+ except:
1317
+ cont_attr_funct_min_val = None
1318
+ if cont_attr_funct_min_val != None:
1319
+ basefunctions.check_is_number(
1320
+ 'cont_attr_funct_min_val', cont_attr_funct_min_val
1321
+ )
1322
+ try:
1323
+ cont_attr_funct_max_val = float(rec_list[6])
1324
+ except:
1325
+ cont_attr_funct_max_val = None
1326
+ if cont_attr_funct_max_val != None:
1327
+ basefunctions.check_is_number(
1328
+ 'cont_attr_funct_max_val', cont_attr_funct_max_val
1329
+ )
1330
+ this_cont_funct_dict[cate_attr2_val] = [
1331
+ cont_attr_funct,
1332
+ cont_attr_funct_mu,
1333
+ cont_attr_funct_sigma,
1334
+ cont_attr_funct_min_val,
1335
+ cont_attr_funct_max_val,
1336
+ ]
1337
+
1338
+ list_counter += 1
1339
+ if list_counter < num_file_rows:
1340
+ rec_list = lookup_file_data[list_counter]
1341
+ else:
1342
+ rec_list = []
1343
+
1344
+ # Generate a list of categorical 2 values according to their counts
1345
+ #
1346
+ cate_attr2_val_list = []
1347
+
1348
+ for (cate_attr2_val, val2_count) in this_cate_val2_dict.items():
1349
+
1350
+ # Append value as many times as given in their counts
1351
+ #
1352
+ new_list = [cate_attr2_val] * val2_count
1353
+ cate_attr2_val_list += new_list
1354
+
1355
+ random.shuffle(cate_attr2_val_list) # Randomly shuffle the list of values
1356
+
1357
+ cate_val2_dict[cate_attr1_val] = cate_attr2_val_list
1358
+
1359
+ # Store function data for each combination of categorial values
1360
+ #
1361
+ for cate_attr2_val in this_cont_funct_dict:
1362
+ cont_dict_key = cate_attr1_val + '-' + cate_attr2_val
1363
+ cont_funct_dict[cont_dict_key] = this_cont_funct_dict[cate_attr2_val]
1364
+
1365
+ # Generate a list of values according to their counts for attribute 1
1366
+ #
1367
+ cate_attr1_val_list = []
1368
+
1369
+ for (cate_attr1_val, val1_count) in cate_val1_dict.items():
1370
+
1371
+ # Append value as many times as given in their counts
1372
+ #
1373
+ new_list = [cate_attr1_val] * val1_count
1374
+ cate_attr1_val_list += new_list
1375
+
1376
+ random.shuffle(cate_attr1_val_list) # Randomly shuffle the list of values
1377
+
1378
+ self.cate_attr1_val_list = cate_attr1_val_list
1379
+ self.cate_val2_dict = cate_val2_dict
1380
+ self.cont_funct_dict = cont_funct_dict
1381
+
1382
+ # ---------------------------------------------------------------------------
1383
+
1384
+ def create_attribute_values(self):
1385
+ """Method which creates and returns two categorical attribute values and
1386
+ one continuous value, where the second categorical value depends upon
1387
+ the first value, andthe continuous value depends on both categorical
1388
+ values. The two categorical values are randomly selected according to
1389
+ the provided frequency distributions, while the continuous value is
1390
+ generated according to the selected function and its parameters.
1391
+ """
1392
+
1393
+ assert self.cate_attr1_val_list != []
1394
+ assert self.cate_val2_dict != {}
1395
+ assert self.cont_funct_dict != {}
1396
+
1397
+ cate_attr1_val = random.choice(self.cate_attr1_val_list)
1398
+
1399
+ cate_attr2_list = self.cate_val2_dict[cate_attr1_val]
1400
+
1401
+ cate_attr2_val = random.choice(cate_attr2_list)
1402
+
1403
+ # Get the details of the function and generate the continuous value
1404
+ #
1405
+ cont_dict_key = cate_attr1_val + '-' + cate_attr2_val
1406
+ funct_details = self.cont_funct_dict[cont_dict_key]
1407
+ funct_name = funct_details[0]
1408
+
1409
+ if funct_name == 'uniform':
1410
+ cont_attr_val = random.uniform(funct_details[1], funct_details[2])
1411
+
1412
+ elif funct_name == 'normal':
1413
+ mu = funct_details[1]
1414
+ sigma = funct_details[2]
1415
+ min_val = funct_details[3]
1416
+ max_val = funct_details[4]
1417
+ in_range = False
1418
+
1419
+ cont_attr_val = random.normalvariate(mu, sigma)
1420
+
1421
+ while in_range == False:
1422
+ if ((min_val != None) and (cont_attr_val < min_val)) or (
1423
+ (max_val != None) and (cont_attr_val > max_val)
1424
+ ):
1425
+ in_range = False
1426
+ cont_attr_val = random.normalvariate(mu, sigma)
1427
+ else:
1428
+ in_range = True
1429
+
1430
+ if min_val != None:
1431
+ assert cont_attr_val >= min_val
1432
+ if max_val != None:
1433
+ assert cont_attr_val <= max_val
1434
+
1435
+ else:
1436
+ raise Exception(('Illegal continuous function given:', funct_name))
1437
+
1438
+ cont_attr_val_str = basefunctions.float_to_str(
1439
+ cont_attr_val, self.continuous_value_type
1440
+ )
1441
+
1442
+ return cate_attr1_val, cate_attr2_val, cont_attr_val_str
1443
+
1444
+
1445
+ # =============================================================================
1446
+
1447
+
1448
+ class GenerateContContCompoundAttribute(GenerateCompoundAttribute):
1449
+ """Generate two continuous attribute values, where the value of the second
1450
+ attribute depends upon the value of the first attribute.
1451
+
1452
+ This for example allows the modelling of:
1453
+ - salary values that depend upon age values, or
1454
+ - blood pressure values that depend upon age values.
1455
+
1456
+ The arguments that have to be set when this attribute type is initialised
1457
+ are:
1458
+
1459
+ continuous1_attribute_name The name of the first continuous attribute
1460
+ that will be generated. This name will be
1461
+ used in the header line to be written into
1462
+ the output file.
1463
+
1464
+ continuous2_attribute_name The name of the second continuous attribute
1465
+ that will be generated. This name will be
1466
+ used in the header line to be written into
1467
+ the output file.
1468
+
1469
+ continuous1_funct_name The name of the function that is used to
1470
+ randomly generate the values of the first
1471
+ attribute. Implemented functions currently
1472
+ are:
1473
+ - uniform
1474
+ - normal
1475
+
1476
+ continuous1_funct_param A list with the parameters required for the
1477
+ function that generates the continuous values
1478
+ in the first attribute. They are:
1479
+ - uniform: [min_val, max_val]
1480
+ - normal: [mu, sigma, min_val, max_val]
1481
+ (min_val and max_val can be set
1482
+ to None in which case no minimum
1483
+ or maximum is enforced)
1484
+
1485
+ continuous2_function A Python function that has a floating-point
1486
+ value as input (assumed to be a value
1487
+ generated for the first attribute) and that
1488
+ returns a floating-point value (assumed to be
1489
+ the value of the second attribute).
1490
+
1491
+ continuous1_value_type The format of how the continuous values in
1492
+ the first attribute are returned when they
1493
+ are generated. Possible values are 'int', so
1494
+ integer values are generated; or 'float1',
1495
+ 'float2', to 'float9', in which case
1496
+ floating-point values with the specified
1497
+ number of digits behind the comma are
1498
+ generated.
1499
+
1500
+ continuous2_value_type The same as for the first attribute.
1501
+ """
1502
+
1503
+ # ---------------------------------------------------------------------------
1504
+
1505
+ def __init__(self, **kwargs):
1506
+ """Constructor. Process the derived keywords first, then call the base
1507
+ class constructor.
1508
+ """
1509
+
1510
+ # General attributes for all data set generators
1511
+ #
1512
+ self.number_of_atttributes = 2
1513
+ self.attribute_type = 'Compound-Continuous-Continuous'
1514
+
1515
+ for (keyword, value) in kwargs.items():
1516
+
1517
+ if keyword.startswith('continuous1_a'):
1518
+ basefunctions.check_is_non_empty_string(
1519
+ 'continuous1_attribute_name', value
1520
+ )
1521
+ self.continuous1_attribute_name = value
1522
+
1523
+ elif keyword.startswith('continuous2_a'):
1524
+ basefunctions.check_is_non_empty_string(
1525
+ 'continuous2_attribute_name', value
1526
+ )
1527
+ self.continuous2_attribute_name = value
1528
+
1529
+ elif keyword.startswith('continuous1_funct_n'):
1530
+ basefunctions.check_is_non_empty_string('continuous1_funct_name', value)
1531
+ self.continuous1_funct_name = value
1532
+
1533
+ elif keyword.startswith('continuous1_funct_p'):
1534
+ basefunctions.check_is_list('continuous1_funct_param', value)
1535
+ self.continuous1_funct_param = value
1536
+
1537
+ elif keyword.startswith('continuous2_f'):
1538
+ basefunctions.check_is_function_or_method('continuous2_function', value)
1539
+ self.continuous2_function = value
1540
+
1541
+ elif keyword.startswith('continuous1_v'):
1542
+ basefunctions.check_is_non_empty_string('continuous1_value_type', value)
1543
+ basefunctions.check_is_valid_format_str('continuous1_value_type', value)
1544
+ self.continuous1_value_type = value
1545
+
1546
+ elif keyword.startswith('continuous2_v'):
1547
+ basefunctions.check_is_non_empty_string('continuous2_value_type', value)
1548
+ basefunctions.check_is_valid_format_str('continuous2_value_type', value)
1549
+ self.continuous2_value_type = value
1550
+
1551
+ else:
1552
+ raise Exception(
1553
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
1554
+ )
1555
+
1556
+ # Check if the necessary variables have been set
1557
+ #
1558
+ basefunctions.check_is_non_empty_string(
1559
+ 'continuous1_attribute_name', self.continuous1_attribute_name
1560
+ )
1561
+ basefunctions.check_is_non_empty_string(
1562
+ 'continuous2_attribute_name', self.continuous2_attribute_name
1563
+ )
1564
+ basefunctions.check_is_non_empty_string(
1565
+ 'continuous1_funct_name', self.continuous1_funct_name
1566
+ )
1567
+ basefunctions.check_is_list(
1568
+ 'continuous1_funct_param', self.continuous1_funct_param
1569
+ )
1570
+ basefunctions.check_is_function_or_method(
1571
+ 'continuous2_function', self.continuous2_function
1572
+ )
1573
+ basefunctions.check_is_non_empty_string(
1574
+ 'continuous1_value_type', self.continuous1_value_type
1575
+ )
1576
+ basefunctions.check_is_non_empty_string(
1577
+ 'continuous2_value_type', self.continuous2_value_type
1578
+ )
1579
+
1580
+ if self.continuous1_attribute_name == self.continuous2_attribute_name:
1581
+ raise Exception('Both attribute names are the same')
1582
+
1583
+ basefunctions.check_is_valid_format_str(
1584
+ 'continuous1_value_type', self.continuous1_value_type
1585
+ )
1586
+ basefunctions.check_is_valid_format_str(
1587
+ 'continuous2_value_type', self.continuous2_value_type
1588
+ )
1589
+
1590
+ # Check that the function for attribute 2 does return a float value
1591
+ #
1592
+ funct_ret = self.continuous2_function(1.0)
1593
+ if not isinstance(funct_ret, float):
1594
+ raise Exception(
1595
+ (
1596
+ 'Function provided for attribute 2 does not return'
1597
+ + ' a floating-point value:',
1598
+ type(funct_ret),
1599
+ )
1600
+ )
1601
+
1602
+ # Check type and number of parameters given for attribute 1 functions
1603
+ #
1604
+ if self.continuous1_funct_name not in ['uniform', 'normal']:
1605
+ raise Exception(
1606
+ 'Illegal continuous attribute 1 function given: "%s"'
1607
+ % (self.continuous1_funct_name)
1608
+ )
1609
+
1610
+ # Get function parameters from file data
1611
+ #
1612
+ if self.continuous1_funct_name == 'uniform':
1613
+ assert len(self.continuous1_funct_param) == 2
1614
+
1615
+ cont_attr1_funct_min_val = self.continuous1_funct_param[0]
1616
+ cont_attr1_funct_max_val = self.continuous1_funct_param[1]
1617
+ basefunctions.check_is_number(
1618
+ 'cont_attr1_funct_min_val', cont_attr1_funct_min_val
1619
+ )
1620
+ basefunctions.check_is_number(
1621
+ 'cont_attr1_funct_max_val', cont_attr1_funct_max_val
1622
+ )
1623
+
1624
+ assert cont_attr1_funct_min_val < cont_attr1_funct_max_val
1625
+
1626
+ self.attr1_funct_param = [cont_attr1_funct_min_val, cont_attr1_funct_max_val]
1627
+
1628
+ elif self.continuous1_funct_name == 'normal':
1629
+ assert len(self.continuous1_funct_param) == 4
1630
+
1631
+ cont_attr1_funct_mu = self.continuous1_funct_param[0]
1632
+ cont_attr1_funct_sigma = self.continuous1_funct_param[1]
1633
+ cont_attr1_funct_min_val = self.continuous1_funct_param[2]
1634
+ cont_attr1_funct_max_val = self.continuous1_funct_param[3]
1635
+
1636
+ basefunctions.check_is_number('cont_attr1_funct_mu', cont_attr1_funct_mu)
1637
+ basefunctions.check_is_number('cont_attr1_funct_sigma', cont_attr1_funct_sigma)
1638
+
1639
+ basefunctions.check_is_positive(
1640
+ 'cont_attr1_funct_sigma', cont_attr1_funct_sigma
1641
+ )
1642
+
1643
+ if cont_attr1_funct_min_val != None:
1644
+ basefunctions.check_is_number(
1645
+ 'cont_attr1_funct_min_val', cont_attr1_funct_min_val
1646
+ )
1647
+ assert cont_attr1_funct_min_val <= cont_attr1_funct_mu
1648
+
1649
+ if cont_attr1_funct_max_val != None:
1650
+ basefunctions.check_is_number(
1651
+ 'cont_attr1_funct_max_val', cont_attr1_funct_max_val
1652
+ )
1653
+ assert cont_attr1_funct_max_val >= cont_attr1_funct_mu
1654
+
1655
+ if (cont_attr1_funct_min_val != None) and (cont_attr1_funct_max_val) != None:
1656
+ assert cont_attr1_funct_min_val < cont_attr1_funct_max_val
1657
+
1658
+ self.attr1_funct_param = [
1659
+ cont_attr1_funct_mu,
1660
+ cont_attr1_funct_sigma,
1661
+ cont_attr1_funct_min_val,
1662
+ cont_attr1_funct_max_val,
1663
+ ]
1664
+
1665
+ # ---------------------------------------------------------------------------
1666
+
1667
+ def create_attribute_values(self):
1668
+ """Method which creates and returns two continuous attribute values, with
1669
+ the the first continuous value according to the selected function and
1670
+ its parameters, and the second value depending upon the first value.
1671
+ """
1672
+
1673
+ # Get the details of the function and generate the first continuous value
1674
+ #
1675
+ funct_name = self.continuous1_funct_name
1676
+ funct_details = self.attr1_funct_param
1677
+
1678
+ if funct_name == 'uniform':
1679
+ cont_attr1_val = random.uniform(funct_details[0], funct_details[1])
1680
+
1681
+ elif funct_name == 'normal':
1682
+ mu = funct_details[0]
1683
+ sigma = funct_details[1]
1684
+ min_val = funct_details[2]
1685
+ max_val = funct_details[3]
1686
+ in_range = False
1687
+
1688
+ cont_attr1_val = random.normalvariate(mu, sigma)
1689
+
1690
+ while in_range == False:
1691
+ if ((min_val != None) and (cont_attr1_val < min_val)) or (
1692
+ (max_val != None) and (cont_attr1_val > max_val)
1693
+ ):
1694
+ in_range = False
1695
+ cont_attr1_val = random.normalvariate(mu, sigma)
1696
+ else:
1697
+ in_range = True
1698
+
1699
+ if min_val != None:
1700
+ assert cont_attr1_val >= min_val
1701
+ if max_val != None:
1702
+ assert cont_attr1_val <= max_val
1703
+
1704
+ else:
1705
+ raise Exception(('Illegal continuous function given:', funct_name))
1706
+
1707
+ # Generate the second attribute value
1708
+ #
1709
+ cont_attr2_val = self.continuous2_function(cont_attr1_val)
1710
+
1711
+ cont_attr1_val_str = basefunctions.float_to_str(
1712
+ cont_attr1_val, self.continuous1_value_type
1713
+ )
1714
+ cont_attr2_val_str = basefunctions.float_to_str(
1715
+ cont_attr2_val, self.continuous2_value_type
1716
+ )
1717
+
1718
+ return cont_attr1_val_str, cont_attr2_val_str
1719
+
1720
+
1721
+ # =============================================================================
1722
+ # Classes for generating a data set
1723
+ # =============================================================================
1724
+
1725
+
1726
+ class GenerateDataSet:
1727
+ """Base class for data set generation.
1728
+
1729
+ This class and all of its derived classes provide methods that allow the
1730
+ generation of a synthetic data set according to user specifications.
1731
+
1732
+ The following arguments need to be set when a GenerateDataSet instance is
1733
+ initialised:
1734
+
1735
+ output_file_name The name of the file that will be generated. This
1736
+ will be a comma separated values (CSV) file. If the
1737
+ file name given does not end with the extension
1738
+ '.csv' then this extension will be added.
1739
+
1740
+ write_header_line A flag (True or false) indicating if a header line
1741
+ with the attribute (field) names is to be written at
1742
+ the beginning of the output file or not. The default
1743
+ for this argument is True.
1744
+
1745
+ rec_id_attr_name The name of the record identifier attribute. This
1746
+ name must be different from the names of all other
1747
+ generated attributes. Record identifiers will be
1748
+ unique values for each generated record.
1749
+
1750
+ number_of_records The number of records that are to be generated. This
1751
+ will correspond to the number of 'original' records
1752
+ that are generated.
1753
+
1754
+ attribute_name_list The list of attributes (fields) that are to be
1755
+ generated for each record, and the sequence how they
1756
+ are to be written into the output file. Each element
1757
+ in this list must be an attribute name. These names
1758
+ will become the header line of the output file (if
1759
+ a header line is to be written).
1760
+
1761
+ attribute_data_list A list which contains the actual attribute objects
1762
+ (from the classes GenerateAttribute and
1763
+ GenerateCompoundAttribute and their respective
1764
+ derived classes).
1765
+
1766
+ unicode_encoding The Unicode encoding (a string name) of the file.
1767
+ """
1768
+
1769
+ # ---------------------------------------------------------------------------
1770
+ def __init__(self, **kwargs):
1771
+ """Constructor, set general attributes."""
1772
+
1773
+ # General attributes for all data set generators
1774
+ #
1775
+ self.output_file_name = None
1776
+ self.write_header_line = True
1777
+ self.rec_id_attr_name = None
1778
+ self.number_of_records = 0
1779
+ self.attribute_name_list = None
1780
+ self.attribute_data_list = None
1781
+ self.unicode_encoding = None
1782
+ self.missing_val_str = ''
1783
+
1784
+ # The following dictionary will contain the generated records, with the
1785
+ # dictionary keys being the record identifiers (unique for each record),
1786
+ # while the dictionary values will be lists containing the actual attribute
1787
+ # values of these generated records.
1788
+ #
1789
+ self.rec_dict = {}
1790
+
1791
+ for (keyword, value) in kwargs.items(): # Process keyword arguments
1792
+
1793
+ if keyword.startswith('output'):
1794
+ basefunctions.check_is_non_empty_string('output_file_name', value)
1795
+
1796
+ # Make sure the file extension is correct
1797
+ #
1798
+ if value.endswith('.csv') == False:
1799
+ value = value + '.csv'
1800
+ self.output_file_name = value
1801
+
1802
+ elif keyword.startswith('write'):
1803
+ basefunctions.check_is_flag('write_header_line', value)
1804
+ self.write_header_line = value
1805
+
1806
+ elif keyword.startswith('rec'):
1807
+ basefunctions.check_is_non_empty_string('rec_id_attr_name', value)
1808
+ self.rec_id_attr_name = value
1809
+
1810
+ elif keyword.startswith('number'):
1811
+ basefunctions.check_is_integer('number_of_records', value)
1812
+ basefunctions.check_is_positive('number_of_records', value)
1813
+ self.number_of_records = value
1814
+
1815
+ elif keyword.startswith('attribute_name'):
1816
+ basefunctions.check_is_list('attribute_name_list', value)
1817
+ if not value:
1818
+ raise Exception('attribute_name_list is empty: %s' % (type(value)))
1819
+ self.attribute_name_list = value
1820
+
1821
+ elif keyword.startswith('attribute_data'):
1822
+ basefunctions.check_is_list('attribute_data_list', value)
1823
+ self.attribute_data_list = value
1824
+
1825
+ elif keyword.startswith('unicode'):
1826
+ basefunctions.check_unicode_encoding_exists(value)
1827
+ self.unicode_encoding = value
1828
+
1829
+ else:
1830
+ raise Exception(
1831
+ 'Illegal constructor argument keyword: "%s"' % (str(keyword))
1832
+ )
1833
+
1834
+ # Check if the necessary variables have been set
1835
+ #
1836
+ basefunctions.check_is_non_empty_string('output_file_name', self.output_file_name)
1837
+ basefunctions.check_is_non_empty_string('rec_id_attr_name', self.rec_id_attr_name)
1838
+ basefunctions.check_is_integer('number_of_records', self.number_of_records)
1839
+ basefunctions.check_is_positive('number_of_records', self.number_of_records)
1840
+ basefunctions.check_is_list('attribute_name_list', self.attribute_name_list)
1841
+ basefunctions.check_is_list('attribute_data_list', self.attribute_data_list)
1842
+ basefunctions.check_unicode_encoding_exists(self.unicode_encoding)
1843
+
1844
+ # Remove potential duplicate entries in the attribute data list
1845
+ #
1846
+ attr_data_set = set()
1847
+ new_attr_data_list = []
1848
+ for attr_data in self.attribute_data_list:
1849
+ if attr_data not in attr_data_set:
1850
+ attr_data_set.add(attr_data)
1851
+ new_attr_data_list.append(attr_data)
1852
+ self.attribute_data_list = new_attr_data_list
1853
+
1854
+ # Check if the attributes listed in the attribute name list are all
1855
+ # different, i.e. no attribute is listed twice, that their names are all
1856
+ # different from the record identifier attribute name.
1857
+ #
1858
+ attr_name_set = set()
1859
+ for attr_name in self.attribute_name_list:
1860
+ if attr_name == self.rec_id_attr_name:
1861
+ raise Exception(
1862
+ 'Attribute given has the same name as the record '
1863
+ + 'identifier attribute'
1864
+ )
1865
+ if attr_name in attr_name_set:
1866
+ raise Exception('Attribute name "%s" is given twice' % (attr_name))
1867
+ attr_name_set.add(attr_name)
1868
+ assert len(attr_name_set) == len(self.attribute_name_list)
1869
+
1870
+ # Check if the attribute names listed in the attribute data list are all
1871
+ # different, i.e. no attribute is listed twice, that their names are all
1872
+ # different from the record identifier attribute name.
1873
+ #
1874
+ attr_name_set = set()
1875
+ for attr_data in self.attribute_data_list:
1876
+
1877
+ if attr_data.attribute_type == 'Compound-Categorical-Categorical':
1878
+ attr1_name = attr_data.categorical1_attribute_name
1879
+ attr2_name = attr_data.categorical2_attribute_name
1880
+ attr3_name = ''
1881
+
1882
+ elif attr_data.attribute_type == 'Compound-Categorical-Continuous':
1883
+ attr1_name = attr_data.categorical_attribute_name
1884
+ attr2_name = attr_data.continuous_attribute_name
1885
+ attr3_name = ''
1886
+
1887
+ elif attr_data.attribute_type == 'Compound-Continuous-Continuous':
1888
+ attr1_name = attr_data.continuous1_attribute_name
1889
+ attr2_name = attr_data.continuous2_attribute_name
1890
+ attr3_name = ''
1891
+
1892
+ elif attr_data.attribute_type == 'Compound-Categorical-Categorical-Continuous':
1893
+ attr1_name = attr_data.categorical1_attribute_name
1894
+ attr2_name = attr_data.categorical2_attribute_name
1895
+ attr3_name = attr_data.continuous_attribute_name
1896
+
1897
+ else: # A single attribute
1898
+ attr1_name = attr_data.attribute_name
1899
+ attr2_name = ''
1900
+ attr3_name = ''
1901
+
1902
+ for attr_name in [attr1_name, attr2_name, attr3_name]:
1903
+ if attr_name != '':
1904
+ if attr_name == self.rec_id_attr_name:
1905
+ raise Exception(
1906
+ 'Attribute given has the same name as the '
1907
+ + 'record identifier attribute'
1908
+ )
1909
+ if attr_name in attr_name_set:
1910
+ raise Exception(
1911
+ 'Attribute name "%s" is given twice' % (attr_name)
1912
+ + ' in attribute data definitions'
1913
+ )
1914
+ attr_name_set.add(attr_name)
1915
+
1916
+ # Check that there is an attribute definition provided for each attribute
1917
+ # listed in the attribute name list.
1918
+ #
1919
+ for attr_name in self.attribute_name_list:
1920
+ found_attr_name = False
1921
+ for attr_data in self.attribute_data_list:
1922
+
1923
+ # Get names from attribute data
1924
+ #
1925
+ if attr_data.attribute_type == 'Compound-Categorical-Categorical':
1926
+ if (attr_name == attr_data.categorical1_attribute_name) or (
1927
+ attr_name == attr_data.categorical2_attribute_name
1928
+ ):
1929
+ found_attr_name = True
1930
+ elif attr_data.attribute_type == 'Compound-Categorical-Continuous':
1931
+ if (attr_name == attr_data.categorical_attribute_name) or (
1932
+ attr_name == attr_data.continuous_attribute_name
1933
+ ):
1934
+ found_attr_name = True
1935
+ elif attr_data.attribute_type == 'Compound-Continuous-Continuous':
1936
+ if (attr_name == attr_data.continuous1_attribute_name) or (
1937
+ attr_name == attr_data.continuous2_attribute_name
1938
+ ):
1939
+ found_attr_name = True
1940
+ elif (
1941
+ attr_data.attribute_type
1942
+ == 'Compound-Categorical-Categorical-Continuous'
1943
+ ):
1944
+ if (
1945
+ (attr_name == attr_data.categorical1_attribute_name)
1946
+ or (attr_name == attr_data.categorical2_attribute_name)
1947
+ or (attr_name == attr_data.continuous_attribute_name)
1948
+ ):
1949
+ found_attr_name = True
1950
+ else: # A single attribute
1951
+ if attr_name == attr_data.attribute_name:
1952
+ found_attr_name = True
1953
+
1954
+ if found_attr_name == False:
1955
+ raise Exception(
1956
+ 'No attribute data available for attribute "%s"' % (attr_name)
1957
+ )
1958
+
1959
+ # ---------------------------------------------------------------------------
1960
+
1961
+ def generate(self):
1962
+ """Method which runs the generation process and generates the specified
1963
+ number of records.
1964
+
1965
+ This method return a list containing the 'number_of_records' generated
1966
+ records, each being a dictionary with the keys being attribute names and
1967
+ values the corresponding attribute values.
1968
+ """
1969
+
1970
+ attr_name_list = self.attribute_name_list # Short-hands to increase speed
1971
+ rec_dict = self.rec_dict
1972
+ miss_val_str = self.missing_val_str
1973
+
1974
+ num_rec_num_digit = len(str(self.number_of_records)) - 1 # For digit padding
1975
+
1976
+ print()
1977
+ print('Generate records with attributes:')
1978
+ print(' ', attr_name_list)
1979
+ print()
1980
+
1981
+ for rec_id in range(self.number_of_records):
1982
+ rec_id_str = 'rec-%s-org' % (str(rec_id).zfill(num_rec_num_digit))
1983
+
1984
+ this_rec_dict = {} # The generated attribute values (attribute names as
1985
+ # keys, attribute values as values)
1986
+ this_rec_list = [] # List of attribute values of the generated data set
1987
+
1988
+ for attr_data in self.attribute_data_list:
1989
+
1990
+ if attr_data.attribute_type == 'Compound-Categorical-Categorical':
1991
+ attr1_name = attr_data.categorical1_attribute_name
1992
+ attr2_name = attr_data.categorical2_attribute_name
1993
+ attr1_val, attr2_val = attr_data.create_attribute_values()
1994
+ this_rec_dict[attr1_name] = attr1_val
1995
+ this_rec_dict[attr2_name] = attr2_val
1996
+
1997
+ elif attr_data.attribute_type == 'Compound-Categorical-Continuous':
1998
+ attr1_name = attr_data.categorical_attribute_name
1999
+ attr2_name = attr_data.continuous_attribute_name
2000
+ attr1_val, attr2_val = attr_data.create_attribute_values()
2001
+ this_rec_dict[attr1_name] = attr1_val
2002
+ this_rec_dict[attr2_name] = attr2_val
2003
+
2004
+ elif attr_data.attribute_type == 'Compound-Continuous-Continuous':
2005
+ attr1_name = attr_data.continuous1_attribute_name
2006
+ attr2_name = attr_data.continuous2_attribute_name
2007
+ attr1_val, attr2_val = attr_data.create_attribute_values()
2008
+ this_rec_dict[attr1_name] = attr1_val
2009
+ this_rec_dict[attr2_name] = attr2_val
2010
+
2011
+ elif (
2012
+ attr_data.attribute_type
2013
+ == 'Compound-Categorical-Categorical-Continuous'
2014
+ ):
2015
+ attr1_name = attr_data.categorical1_attribute_name
2016
+ attr2_name = attr_data.categorical2_attribute_name
2017
+ attr3_name = attr_data.continuous_attribute_name
2018
+ attr1_val, attr2_val, attr3_val = attr_data.create_attribute_values()
2019
+ this_rec_dict[attr1_name] = attr1_val
2020
+ this_rec_dict[attr2_name] = attr2_val
2021
+ this_rec_dict[attr3_name] = attr3_val
2022
+
2023
+ else: # A single attribute
2024
+ attr_name = attr_data.attribute_name
2025
+ attr_val = attr_data.create_attribute_value()
2026
+ this_rec_dict[attr_name] = attr_val
2027
+
2028
+ # Compile output record
2029
+ #
2030
+ for attr_name in attr_name_list:
2031
+ attr_val = this_rec_dict.get(attr_name, miss_val_str)
2032
+ assert isinstance(attr_val, str), attr_val
2033
+ this_rec_list.append(attr_val)
2034
+
2035
+ rec_dict[rec_id_str] = this_rec_list
2036
+
2037
+ print('Generated record with ID: %s' % (rec_id_str))
2038
+ print(' %s' % (str(this_rec_list)))
2039
+ print()
2040
+
2041
+ print('Generated %d records' % (self.number_of_records))
2042
+ print()
2043
+ print('------------------------------------------------------------------')
2044
+ print()
2045
+
2046
+ return rec_dict
2047
+
2048
+ # ---------------------------------------------------------------------------
2049
+
2050
+ def write(self):
2051
+ """Write the generated records into the defined output file."""
2052
+
2053
+ rec_id_list = list(self.rec_dict.keys())
2054
+ rec_id_list.sort()
2055
+
2056
+ # Convert record dictionary into a list, with record identifier added
2057
+ #
2058
+ rec_list = []
2059
+
2060
+ for rec_id in rec_id_list:
2061
+ this_rec_list = [rec_id] + self.rec_dict[rec_id]
2062
+ rec_list.append(this_rec_list)
2063
+
2064
+ header_list = [self.rec_id_attr_name] + self.attribute_name_list
2065
+ basefunctions.write_csv_file(
2066
+ self.output_file_name, self.unicode_encoding, header_list, rec_list
2067
+ )
requirements.txt ADDED
File without changes
setup.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ from setuptools import setup, find_packages
3
+
4
+ HERE = pathlib.Path(__file__).parent
5
+ README = (HERE / 'README.md').read_text()
6
+
7
+
8
+ def read_requirements(reqs_path):
9
+ with open(reqs_path, encoding='utf8') as f:
10
+ reqs = [
11
+ line.strip()
12
+ for line in f
13
+ if not line.strip().startswith('#') and not line.strip().startswith('--')
14
+ ]
15
+ return reqs
16
+
17
+
18
+ setup(
19
+ name="geco_data_generator",
20
+ version="0.0.1",
21
+ description="kg-geco_data_generator",
22
+ long_description=README,
23
+ long_description_content_type='text/markdown',
24
+ url='https://dmm.anu.edu.au/geco/',
25
+ author='',
26
+ author_email='',
27
+ python_requires='>=3.7',
28
+ classifiers=[
29
+ 'Programming Language :: Python :: 3',
30
+ 'Programming Language :: Python :: 3.7',
31
+ 'Topic :: Scientific/Engineering',
32
+ ],
33
+ packages=find_packages(exclude=['tests*', 'scripts', 'utils']),
34
+ include_package_data=True,
35
+ install_requires=read_requirements(HERE / 'requirements.txt'),
36
+ )
tests/attrgenfunct_test.py ADDED
@@ -0,0 +1,922 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ import random
5
+ import time
6
+ import unittest
7
+
8
+ from geco_data_generator import attrgenfunct
9
+
10
+ random.seed(42)
11
+
12
+ # Define the number of tests to be done for the functionality tests
13
+ num_tests = 10000
14
+
15
+ # Define argument test cases here
16
+ test_argument_data_dict = {
17
+ ('attrgenfunct', 'n/a', 'generate_uniform_value'): {
18
+ 'min_val': [
19
+ [
20
+ [-10, 10, 'int'],
21
+ [10, 110, 'float1'],
22
+ [1110.0, 11011.9, 'float2'],
23
+ [-1110.0, -11.9, 'float9'],
24
+ [-51110, 1247, 'float7'],
25
+ ],
26
+ [
27
+ ['-10', 10, 'int'],
28
+ [None, 10, 'float1'],
29
+ [{}, 11011.9, 'float2'],
30
+ ["-1110.0", -11.9, 'float9'],
31
+ ['int', 1247, 'float7'],
32
+ ],
33
+ ],
34
+ 'max_val': [
35
+ [
36
+ [-10, 10, 'int'],
37
+ [10, 110, 'float1'],
38
+ [1110.0, 11011.9, 'float2'],
39
+ [-1110.0, -11.9, 'float9'],
40
+ [-51110, 1247, 'float7'],
41
+ ],
42
+ [
43
+ [-10, '10', 'int'],
44
+ [10, None, 'float1'],
45
+ [1110.0, {}, 'float2'],
46
+ [-1110.0, "-11.9", 'float9'],
47
+ [-51110, 'int', 'float7'],
48
+ ],
49
+ ],
50
+ 'val_type': [
51
+ [
52
+ [-10, 10, 'int'],
53
+ [10, 110, 'float1'],
54
+ [1110.0, 11011.9, 'float2'],
55
+ [1.0, 9.9, 'float3'],
56
+ [1.0, 2.9, 'float4'],
57
+ [1.0, 11.9, 'float5'],
58
+ [1.0, 9.9, 'float6'],
59
+ [1.0, 2.9, 'float7'],
60
+ [1.0, 11.9, 'float8'],
61
+ [-1110.0, -11.9, 'float9'],
62
+ ],
63
+ [
64
+ [-10, 10, 'int2'],
65
+ [10, 110, 'float0'],
66
+ [1110.0, 11011.9, 'float-'],
67
+ [1.0, 9.9, 'test'],
68
+ [1.0, 2.9, {}],
69
+ [1.0, 11.9, ''],
70
+ [1.0, 9.9, 42],
71
+ [1.0, 2.9, -42.42],
72
+ [1.0, 11.9, []],
73
+ ],
74
+ ],
75
+ },
76
+ #
77
+ ('attrgenfunct', 'n/a', 'generate_uniform_age'): {
78
+ 'min_val': [
79
+ [[0, 120], [10, 100], [25.0, 76.9], [55, 56], [55.0, 57.9]],
80
+ [[-1, 120], [210, 100], ['25.0', 76.9], [{}, 56], [-55.0, 57.9]],
81
+ ],
82
+ 'max_val': [
83
+ [[0, 120], [10, 100], [25.0, 76.9], [55, 56], [55.0, 57.9]],
84
+ [[0, 140], [10, -100], [25.0, '76.9'], [55, {}], [55.0, -57.9]],
85
+ ],
86
+ },
87
+ #
88
+ ('attrgenfunct', 'n/a', 'generate_normal_value'): {
89
+ 'mu': [
90
+ [
91
+ [1.0, 1.0, -10, 10, 'int'],
92
+ [-5, 25, -10, 110, 'float1'],
93
+ [100.42, 2000, -1110.0, 11011.9, 'float2'],
94
+ [24.24, 5.5, None, 30.0, 'float9'],
95
+ [24.24, 5.5, 10.0, None, 'float7'],
96
+ ],
97
+ [
98
+ ['1.0', 1.0, -10, 10, 'int'],
99
+ [-55, 25, -10, 110, 'float1'],
100
+ [255, 25, -10, 110, 'float1'],
101
+ [None, 2000, -1110.0, 11011.9, 'float2'],
102
+ [[], 5.5, None, 30.0, 'float9'],
103
+ [{}, 5.5, 10.0, None, 'float7'],
104
+ ],
105
+ ],
106
+ 'sigma': [
107
+ [
108
+ [1.0, 1.0, -10, 10, 'int'],
109
+ [-5, 25, -10, 110, 'float1'],
110
+ [100.42, 2000, -1110.0, 11011.9, 'float2'],
111
+ [24.24, 5.5, None, 30.0, 'float9'],
112
+ [24.24, 5.5, 10.0, None, 'float7'],
113
+ ],
114
+ [
115
+ [1.0, '1.0', -10, 10, 'int'],
116
+ [-5, -25, -10, 110, 'float1'],
117
+ [100.42, None, -1110.0, 11011.9, 'float2'],
118
+ [24.24, {}, None, 30.0, 'float9'],
119
+ [24.24, [], 10.0, None, 'float7'],
120
+ ],
121
+ ],
122
+ 'min_val': [
123
+ [
124
+ [1.0, 1.0, -10, 10, 'int'],
125
+ [-5, 25, -10, 110, 'float1'],
126
+ [100.42, 2000, -1110.0, 11011.9, 'float2'],
127
+ [24.24, 5.5, None, 30.0, 'float9'],
128
+ [24.24, 5.5, 10.0, None, 'float7'],
129
+ ],
130
+ [
131
+ [1.0, 1.0, '-10', 10, 'int'],
132
+ [-5, 25, 120, 110, 'float1'],
133
+ [100.42, 2000, {}, 11011.9, 'float2'],
134
+ [24.24, 5.5, 'None', 30.0, 'float9'],
135
+ [24.24, 5.5, 120.0, None, 'float7'],
136
+ ],
137
+ ],
138
+ 'max_val': [
139
+ [
140
+ [1.0, 1.0, -10, 10, 'int'],
141
+ [-5, 25, -10, 110, 'float1'],
142
+ [100.42, 2000, -1110.0, 11011.9, 'float2'],
143
+ [24.24, 5.5, None, 30.0, 'float9'],
144
+ [24.24, 5.5, 10.0, None, 'float7'],
145
+ ],
146
+ [
147
+ [1.0, 1.0, -10, '10', 'int'],
148
+ [-5, 25, -10, -110, 'float1'],
149
+ [100.42, 2000, -1110.0, {}, 'float2'],
150
+ [24.24, 5.5, None, -30.0, 'float9'],
151
+ [24.24, 5.5, 10.0, [], 'float7'],
152
+ ],
153
+ ],
154
+ 'val_type': [
155
+ [
156
+ [1.0, 1.0, -10, 10, 'int'],
157
+ [-5, 25, -10, 110, 'float1'],
158
+ [100.42, 2000, -1110.0, 11011.9, 'float2'],
159
+ [24.24, 5.5, None, 30.0, 'float9'],
160
+ [24.24, 5.5, 10.0, None, 'float7'],
161
+ ],
162
+ [
163
+ [1.0, 1.0, -10, 10, 'int2'],
164
+ [-5, 25, -10, 110, 'float21'],
165
+ [100.42, 2000, -1110.0, 11011.9, None],
166
+ [24.24, 5.5, None, 30.0, 42.42],
167
+ [24.24, 5.5, 10.0, None, {}],
168
+ ],
169
+ ],
170
+ },
171
+ #
172
+ ('attrgenfunct', 'n/a', 'generate_normal_age'): {
173
+ 'mu': [
174
+ [
175
+ [51.0, 1.0, 0, 110],
176
+ [45, 25, 4, 110],
177
+ [50.42, 50, 5, 77],
178
+ [24.24, 5.5, 1, 50.0],
179
+ [24.24, 5.5, 10.0, 99],
180
+ ],
181
+ [
182
+ ['51.0', 1.0, 0, 110],
183
+ [-45, 25, 4, 110],
184
+ [223, 25, 4, 110],
185
+ [None, 50, 5, 77],
186
+ [30, 20, 40, 110],
187
+ [70, 20, 10, 60],
188
+ [{}, 5.5, 1, 50.0],
189
+ ['', 5.5, 10.0, 99],
190
+ ],
191
+ ],
192
+ 'sigma': [
193
+ [
194
+ [51.0, 1.0, 0, 110],
195
+ [45, 25, 4, 110],
196
+ [50.42, 50, 5, 77],
197
+ [24.24, 5.5, 1, 50.0],
198
+ [24.24, 5.5, 10.0, 99],
199
+ ],
200
+ [
201
+ [51.0, -1.0, 0, 110],
202
+ [45, '25', 4, 110],
203
+ [50.42, None, 5, 77],
204
+ [24.24, {}, 1, 50.0],
205
+ [24.24, [], 10.0, 99],
206
+ ],
207
+ ],
208
+ 'min_val': [
209
+ [
210
+ [51.0, 1.0, 0, 110],
211
+ [45, 25, 4, 110],
212
+ [50.42, 50, 5, 77],
213
+ [24.24, 5.5, 1, 50.0],
214
+ [24.24, 5.5, 10.0, 99],
215
+ ],
216
+ [
217
+ [51.0, 1.0, -10, 110],
218
+ [45, 25, 134, 110],
219
+ [50.42, 50, 'None', 77],
220
+ [24.24, 5.5, {}, 50.0],
221
+ [24.24, 5.5, [], 99],
222
+ ],
223
+ ],
224
+ 'max_val': [
225
+ [
226
+ [51.0, 1.0, 0, 110],
227
+ [45, 25, 4, 110],
228
+ [50.42, 50, 5, 77],
229
+ [24.24, 5.5, 1, 50.0],
230
+ [24.24, 5.5, 10.0, 99],
231
+ ],
232
+ [
233
+ [51.0, 1.0, 0, '110'],
234
+ [45, 25, 4, -110],
235
+ [50.42, 50, 5, 'None'],
236
+ [24.24, 5.5, 1, {}],
237
+ [24.24, 5.5, 10.0, []],
238
+ ],
239
+ ],
240
+ },
241
+ # add tests for generate_normal_value, generate_normal_age
242
+ }
243
+
244
+ # =============================================================================
245
+
246
+
247
+ class TestCase(unittest.TestCase):
248
+
249
+ # Initialise test case - - - - - - - - - - - - - - - - - - - - - - - - - - -
250
+ #
251
+ def setUp(self):
252
+ pass # Nothing to initialize
253
+
254
+ # Clean up test case - - - - - - - - - - - - - - - - - - - - - - - - - - - -
255
+ #
256
+ def tearDown(self):
257
+ pass # Nothing to clean up
258
+
259
+ # ---------------------------------------------------------------------------
260
+ # Start test cases
261
+
262
+ def testArguments(self, test_data):
263
+ """Test if a function or method can be called or initialised correctly with
264
+ different values for their input arguments (parameters).
265
+
266
+ The argument 'test_data' must be a dictionary with the following
267
+ structure:
268
+
269
+ - Keys are tuples consisting of three strings:
270
+ (module_name, class_name, function_or_method_name)
271
+ - Values are dictionaries where the keys are the names of the input
272
+ argument that is being tested, and the values of these dictionaries
273
+ are a list that contains two lists. The first list contains valid
274
+ input arguments ('normal' argument tests) that should pass the test,
275
+ while the second list contains illegal input arguments ('exception'
276
+ argument tests) that should raise an exception.
277
+
278
+ The lists of test cases are itself lists, each containing a number of
279
+ input argument values, as many as are expected by the function or
280
+ method that is being tested.
281
+
282
+ This function returns a list containing the test results, where each
283
+ list element is a string with comma separated values (CSV) which are to
284
+ be written into the testing log file.
285
+ """
286
+
287
+ test_res_list = [] # The test results, one element per element in the test
288
+ # data dictionary
289
+
290
+ for (test_method_names, test_method_data) in test_data.iteritems():
291
+ test_method_name = test_method_names[2]
292
+ print('Testing arguments for method/function:', test_method_name)
293
+
294
+ for argument_name in test_method_data:
295
+ print(' Testing input argument:', argument_name)
296
+
297
+ norm_test_data = test_method_data[argument_name][0]
298
+ exce_test_data = test_method_data[argument_name][1]
299
+ print(' Normal test cases: ', norm_test_data)
300
+ print(' Exception test cases:', exce_test_data)
301
+
302
+ # Conduct normal tests - - - - - - - - - - - - - - - - - - - - - - - -
303
+ #
304
+ num_norm_test_cases = len(norm_test_data)
305
+ num_norm_test_passed = 0
306
+ num_norm_test_failed = 0
307
+ norm_failed_desc_str = ''
308
+
309
+ for test_input in norm_test_data:
310
+ passed = True # Assume the test will pass :-)
311
+
312
+ if len(test_input) == 0: # Method has no input argument
313
+ try:
314
+ getattr(attrgenfunct, test_method_name)()
315
+ except:
316
+ passed = False
317
+
318
+ elif len(test_input) == 1: # Method has one input argument
319
+ try:
320
+ getattr(attrgenfunct, test_method_name)(test_input[0])
321
+ except:
322
+ passed = False
323
+
324
+ elif len(test_input) == 2: # Method has two input arguments
325
+ try:
326
+ getattr(attrgenfunct, test_method_name)(
327
+ test_input[0], test_input[1]
328
+ )
329
+ except:
330
+ passed = False
331
+
332
+ elif len(test_input) == 3: # Method has three input arguments
333
+ try:
334
+ getattr(attrgenfunct, test_method_name)(
335
+ test_input[0], test_input[1], test_input[2]
336
+ )
337
+ except:
338
+ passed = False
339
+
340
+ elif len(test_input) == 4: # Method has four input arguments
341
+ try:
342
+ getattr(attrgenfunct, test_method_name)(
343
+ test_input[0], test_input[1], test_input[2], test_input[3]
344
+ )
345
+ except:
346
+ passed = False
347
+
348
+ elif len(test_input) == 5: # Method has five input arguments
349
+ try:
350
+ getattr(attrgenfunct, test_method_name)(
351
+ test_input[0],
352
+ test_input[1],
353
+ test_input[2],
354
+ test_input[3],
355
+ test_input[4],
356
+ )
357
+ except:
358
+ passed = False
359
+
360
+ else:
361
+ raise Exception('Illegal number of input arguments')
362
+
363
+ # Now process test results
364
+ #
365
+ if passed == False:
366
+ num_norm_test_failed += 1
367
+ norm_failed_desc_str += 'Failed test for input ' + "'%s'; " % (
368
+ str(test_input)
369
+ )
370
+ else:
371
+ num_norm_test_passed += 1
372
+
373
+ assert num_norm_test_failed + num_norm_test_passed == num_norm_test_cases
374
+
375
+ norm_test_result_str = (
376
+ test_method_names[0]
377
+ + ','
378
+ + test_method_names[1]
379
+ + ','
380
+ + test_method_names[2]
381
+ + ','
382
+ + argument_name
383
+ + ',normal,'
384
+ + '%d,' % (num_norm_test_cases)
385
+ )
386
+ if num_norm_test_failed == 0:
387
+ norm_test_result_str += 'all tests passed'
388
+ else:
389
+ norm_test_result_str += '%d tests failed,' % (num_norm_test_failed)
390
+ norm_test_result_str += '"' + norm_failed_desc_str[:-2] + '"'
391
+
392
+ test_res_list.append(norm_test_result_str)
393
+
394
+ # Conduct exception tests - - - - - - - - - - - - - - - - - - - - - - -
395
+ #
396
+ num_exce_test_cases = len(exce_test_data)
397
+ num_exce_test_passed = 0
398
+ num_exce_test_failed = 0
399
+ exce_failed_desc_str = ''
400
+
401
+ for test_input in exce_test_data:
402
+ passed = True # Assume the test will pass (i.e. raise an exception)
403
+
404
+ if len(test_input) == 0: # Method has no input argument
405
+ try:
406
+ self.assertRaises(
407
+ Exception, getattr(attrgenfunct, test_method_name)
408
+ )
409
+ except:
410
+ passed = False
411
+
412
+ elif len(test_input) == 1: # Method has one input argument
413
+ try:
414
+ self.assertRaises(
415
+ Exception,
416
+ getattr(attrgenfunct, test_method_name),
417
+ test_input[0],
418
+ )
419
+ except:
420
+ passed = False
421
+
422
+ elif len(test_input) == 2: # Method has two input arguments
423
+ try:
424
+ self.assertRaises(
425
+ Exception,
426
+ getattr(attrgenfunct, test_method_name),
427
+ test_input[0],
428
+ test_input[1],
429
+ )
430
+ except:
431
+ passed = False
432
+
433
+ elif len(test_input) == 3: # Method has three input arguments
434
+ try:
435
+ self.assertRaises(
436
+ Exception,
437
+ getattr(attrgenfunct, test_method_name),
438
+ test_input[0],
439
+ test_input[1],
440
+ test_input[2],
441
+ )
442
+ except:
443
+ passed = False
444
+
445
+ elif len(test_input) == 4: # Method has four input arguments
446
+ try:
447
+ self.assertRaises(
448
+ Exception,
449
+ getattr(attrgenfunct, test_method_name),
450
+ test_input[0],
451
+ test_input[1],
452
+ test_input[2],
453
+ test_input[3],
454
+ )
455
+ except:
456
+ passed = False
457
+
458
+ elif len(test_input) == 5: # Method has five input arguments
459
+ try:
460
+ self.assertRaises(
461
+ Exception,
462
+ getattr(attrgenfunct, test_method_name),
463
+ test_input[0],
464
+ test_input[1],
465
+ test_input[2],
466
+ test_input[3],
467
+ test_input[4],
468
+ )
469
+ except:
470
+ passed = False
471
+
472
+ else:
473
+ raise Exception('Illegal number of input arguments')
474
+
475
+ # Now process test results
476
+ #
477
+ if passed == False:
478
+ num_exce_test_failed += 1
479
+ exce_failed_desc_str += 'Failed test for input ' + "'%s'; " % (
480
+ str(test_input)
481
+ )
482
+ else:
483
+ num_exce_test_passed += 1
484
+
485
+ assert num_exce_test_failed + num_exce_test_passed == num_exce_test_cases
486
+
487
+ exce_test_result_str = (
488
+ test_method_names[0]
489
+ + ','
490
+ + test_method_names[1]
491
+ + ','
492
+ + test_method_names[2]
493
+ + ','
494
+ + argument_name
495
+ + ',exception,'
496
+ + '%d,' % (num_exce_test_cases)
497
+ )
498
+ if num_exce_test_failed == 0:
499
+ exce_test_result_str += 'all tests passed'
500
+ else:
501
+ exce_test_result_str += '%d tests failed,' % (num_exce_test_failed)
502
+ exce_test_result_str += '"' + exce_failed_desc_str[:-2] + '"'
503
+
504
+ test_res_list.append(exce_test_result_str)
505
+
506
+ test_res_list.append('') # Empty line between tests of methods
507
+
508
+ return test_res_list
509
+
510
+ # ---------------------------------------------------------------------------
511
+
512
+ def testFunct_generate_phone_number_australia(self, num_tests):
513
+ """Test the functionality of 'generate_phone_number_australia', making
514
+ sure this function returns strings of consisting only of digits, with
515
+ the first digit being 0, and two whitespaces at specific positions.
516
+ """
517
+
518
+ print('Testing functionality of "generate_phone_number_australia"')
519
+
520
+ num_passed = 0
521
+ num_failed = 0
522
+
523
+ for i in range(num_tests):
524
+
525
+ oz_phone_num = attrgenfunct.generate_phone_number_australia()
526
+
527
+ passed = True
528
+
529
+ if len(oz_phone_num) != 12:
530
+ passed = False
531
+ if oz_phone_num[0] != '0':
532
+ passed = False
533
+ if oz_phone_num[2] != ' ':
534
+ passed = False
535
+ if oz_phone_num[7] != ' ':
536
+ passed = False
537
+ oz_phone_num_no_space = oz_phone_num.replace(' ', '') # Remove spaces
538
+ if not oz_phone_num_no_space.isdigit():
539
+ passed = False
540
+
541
+ if passed == True:
542
+ num_passed += 1
543
+ else:
544
+ num_failed += 1
545
+
546
+ assert num_passed + num_failed == num_tests
547
+
548
+ test_result_str = (
549
+ 'attrgenfunct,n/a,generate_phone_number_australia,'
550
+ + 'n/a,funct,%d,' % (num_tests)
551
+ )
552
+ if num_failed == 0:
553
+ test_result_str += 'all tests passed'
554
+ else:
555
+ test_result_str += '%d tests failed' % (num_failed)
556
+
557
+ return [test_result_str, '']
558
+
559
+ # ---------------------------------------------------------------------------
560
+
561
+ def testFunct_generate_credit_card_number(self, num_tests):
562
+ """Test the functionality of 'generate_credit_card_number', making sure
563
+ this function returns strings of consisting only of four groups of
564
+ digits, eac hwith 4 digits, and three whitespaces at specific positions.
565
+ """
566
+
567
+ print('Testing functionality of "generate_credit_card_number"')
568
+
569
+ num_passed = 0
570
+ num_failed = 0
571
+
572
+ for i in range(num_tests):
573
+
574
+ cc_num = attrgenfunct.generate_credit_card_number()
575
+
576
+ passed = True
577
+
578
+ if len(cc_num) != 19:
579
+ passed = False
580
+ if cc_num[4] != ' ':
581
+ passed = False
582
+ if cc_num[9] != ' ':
583
+ passed = False
584
+ if cc_num[14] != ' ':
585
+ passed = False
586
+ cc_num_no_space = cc_num.replace(' ', '') # Remove spaces
587
+ if not cc_num_no_space.isdigit():
588
+ passed = False
589
+
590
+ if passed == True:
591
+ num_passed += 1
592
+ else:
593
+ num_failed += 1
594
+
595
+ assert num_passed + num_failed == num_tests
596
+
597
+ test_result_str = (
598
+ 'attrgenfunct,n/a,generate_credit_card_number,' + 'n/a,funct,%d,' % (num_tests)
599
+ )
600
+ if num_failed == 0:
601
+ test_result_str += 'all tests passed'
602
+ else:
603
+ test_result_str += '%d tests failed' % (num_failed)
604
+
605
+ return [test_result_str, '']
606
+
607
+ # ---------------------------------------------------------------------------
608
+
609
+ def testFunct_generate_uniform_value(self, num_tests):
610
+ """Test the functionality of 'generate_uniform_value', making sure
611
+ this function returns a string according to the given value type in
612
+ the range between the minimum and maximum value specified.
613
+ """
614
+
615
+ print('Testing functionality of "generate_uniform_value"')
616
+
617
+ num_passed = 0
618
+ num_failed = 0
619
+
620
+ for i in range(num_tests):
621
+
622
+ for val_type in [
623
+ 'int',
624
+ 'float1',
625
+ 'float2',
626
+ 'float3',
627
+ 'float4',
628
+ 'float5',
629
+ 'float6',
630
+ 'float7',
631
+ 'float8',
632
+ 'float9',
633
+ ]:
634
+
635
+ for min_val in [-100.0, -42, 0, 10, 100.0]:
636
+ for max_val in [200.0, 242, 10000.01]:
637
+
638
+ norm_val = attrgenfunct.generate_uniform_value(
639
+ min_val, max_val, val_type
640
+ )
641
+ passed = True
642
+
643
+ if float(norm_val) < min_val:
644
+ passed = False
645
+ if float(norm_val) > max_val:
646
+ passed = False
647
+ if (val_type == 'int') and ('.' in norm_val):
648
+ passed = False
649
+ if val_type != 'int':
650
+ num_digit = int(val_type[-1])
651
+ norm_val_list = norm_val.split('.')
652
+ if len(norm_val_list[1]) > num_digit:
653
+ passed = (
654
+ False # Only larger, because for example 100.0 will
655
+ )
656
+ # only return with 1 digit after comma
657
+
658
+ if passed == True:
659
+ num_passed += 1
660
+ else:
661
+ num_failed += 1
662
+
663
+ assert num_passed + num_failed == (15 * 10 * num_tests)
664
+
665
+ test_result_str = 'attrgenfunct,n/a,generate_uniform_value,' + 'n/a,funct,%d,' % (
666
+ 15 * 10 * num_tests
667
+ )
668
+ if num_failed == 0:
669
+ test_result_str += 'all tests passed'
670
+ else:
671
+ test_result_str += '%d tests failed' % (num_failed)
672
+
673
+ return [test_result_str, '']
674
+
675
+ # ---------------------------------------------------------------------------
676
+
677
+ def testFunct_generate_uniform_age(self, num_tests):
678
+ """Test the functionality of 'generate_uniform_age', making sure this
679
+ function returns a string with an integer value between 0 and 130.
680
+ """
681
+
682
+ print('Testing functionality of "generate_uniform_age"')
683
+
684
+ num_passed = 0
685
+ num_failed = 0
686
+
687
+ for i in range(num_tests):
688
+
689
+ for (min_val, max_val) in [(0, 10), (0, 120), (0, 45), (42, 48), (40, 120)]:
690
+
691
+ age_val = attrgenfunct.generate_uniform_age(min_val, max_val)
692
+
693
+ passed = True
694
+
695
+ if float(age_val) < min_val:
696
+ passed = False
697
+ if float(age_val) > max_val:
698
+ passed = False
699
+ if '.' in age_val:
700
+ passed = False
701
+
702
+ if passed == True:
703
+ num_passed += 1
704
+ else:
705
+ num_failed += 1
706
+
707
+ assert num_passed + num_failed == num_tests * 5
708
+
709
+ test_result_str = 'attrgenfunct,n/a,generate_uniform_age,' + 'n/a,funct,%d,' % (
710
+ num_tests * 5
711
+ )
712
+ if num_failed == 0:
713
+ test_result_str += 'all tests passed'
714
+ else:
715
+ test_result_str += '%d tests failed' % (num_failed)
716
+
717
+ return [test_result_str, '']
718
+
719
+ # ---------------------------------------------------------------------------
720
+
721
+ def testFunct_generate_normal_value(self, num_tests):
722
+ """Test the functionality of 'generate_normal_value', making sure this
723
+ function returns a string according to the given value type in the
724
+ range between the minimum and maximum value specified.
725
+ """
726
+
727
+ print('Testing functionality of "generate_normal_value"')
728
+
729
+ num_passed = 0
730
+ num_failed = 0
731
+
732
+ for i in range(num_tests):
733
+
734
+ for val_type in [
735
+ 'int',
736
+ 'float1',
737
+ 'float2',
738
+ 'float3',
739
+ 'float4',
740
+ 'float5',
741
+ 'float6',
742
+ 'float7',
743
+ 'float8',
744
+ 'float9',
745
+ ]:
746
+
747
+ for (mu, sigma, min_val, max_val) in [
748
+ (0, 1, -10, 10),
749
+ (0, 1, -1, 1),
750
+ (-100.5, 123.45, -1010.7, -10.11),
751
+ (12345.87, 54875.1, -400532, 96344),
752
+ (0, 1, None, 10),
753
+ (0, 1, None, 1),
754
+ (-100.5, 123.45, None, -10.11),
755
+ (12345.87, 54875.1, None, 96344),
756
+ (0, 1, -10, None),
757
+ (0, 1, -1, None),
758
+ (-100.5, 123.45, -1010.7, None),
759
+ (12345.87, 54875.1, -400532, None),
760
+ (0, 1, None, None),
761
+ (0, 1, None, None),
762
+ (-100.5, 123.45, None, None),
763
+ (12345.87, 54875.1, None, None),
764
+ ]:
765
+
766
+ norm_val = attrgenfunct.generate_normal_value(
767
+ mu, sigma, min_val, max_val, val_type
768
+ )
769
+ passed = True
770
+
771
+ if (min_val != None) and (float(norm_val) < min_val):
772
+ print('1:', norm_val, min_val)
773
+ passed = False
774
+ if (max_val != None) and (float(norm_val) > max_val):
775
+ print('2:', norm_val, max_val)
776
+ passed = False
777
+ if (val_type == 'int') and ('.' in norm_val):
778
+ print('3:', norm_val)
779
+ passed = False
780
+ if val_type != 'int':
781
+ num_digit = int(val_type[-1])
782
+ norm_val_list = norm_val.split('.')
783
+
784
+ if len(norm_val_list[1]) > num_digit:
785
+ print('4:', norm_val, val_type)
786
+ passed = False # Only larger, because for example 100.0 will
787
+ # only return with 1 digit after comma
788
+
789
+ if passed == True:
790
+ num_passed += 1
791
+ else:
792
+ num_failed += 1
793
+
794
+ assert num_passed + num_failed == (16 * 10 * num_tests)
795
+
796
+ test_result_str = 'attrgenfunct,n/a,generate_normal_value,' + 'n/a,funct,%d,' % (
797
+ 16 * 10 * num_tests
798
+ )
799
+ if num_failed == 0:
800
+ test_result_str += 'all tests passed'
801
+ else:
802
+ test_result_str += '%d tests failed' % (num_failed)
803
+
804
+ return [test_result_str, '']
805
+
806
+ # ---------------------------------------------------------------------------
807
+
808
+ def testFunct_generate_normal_age(self, num_tests):
809
+ """Test the functionality of 'generate_normal_age', making sure this
810
+ function returns a string with an integer value between 0 and 130.
811
+ """
812
+
813
+ print('Testing functionality of "generate_normal_age"')
814
+
815
+ num_passed = 0
816
+ num_failed = 0
817
+
818
+ for i in range(num_tests):
819
+
820
+ for (mu, sigma, min_val, max_val) in [
821
+ (50, 100, 0, 130),
822
+ (25, 20, 5, 55),
823
+ (65, 10, 60, 130),
824
+ (85, 20, 0, 95),
825
+ ]:
826
+
827
+ age_val = attrgenfunct.generate_normal_age(mu, sigma, min_val, max_val)
828
+
829
+ passed = True
830
+
831
+ if float(age_val) < min_val:
832
+ passed = False
833
+ if float(age_val) > max_val:
834
+ passed = False
835
+ if '.' in age_val:
836
+ passed = False
837
+
838
+ if passed == True:
839
+ num_passed += 1
840
+ else:
841
+ num_failed += 1
842
+
843
+ assert num_passed + num_failed == num_tests * 4
844
+
845
+ test_result_str = 'attrgenfunct,n/a,generate_normal_age,' + 'n/a,funct,%d,' % (
846
+ num_tests * 4
847
+ )
848
+ if num_failed == 0:
849
+ test_result_str += 'all tests passed'
850
+ else:
851
+ test_result_str += '%d tests failed' % (num_failed)
852
+
853
+ return [test_result_str, '']
854
+
855
+
856
+ # =============================================================================
857
+ # Generate a time string to be used for the log file
858
+ #
859
+ curr_time_tuple = time.localtime()
860
+ curr_time_str = (
861
+ str(curr_time_tuple[0])
862
+ + str(curr_time_tuple[1]).zfill(2)
863
+ + str(curr_time_tuple[2]).zfill(2)
864
+ + '-'
865
+ + str(curr_time_tuple[3]).zfill(2)
866
+ + str(curr_time_tuple[4]).zfill(2)
867
+ )
868
+
869
+ # Write test output header line into the log file
870
+ #
871
+ out_file_name = './logs/attrgenfunctTest-%s.csv' % (curr_time_str)
872
+
873
+ out_file = open(out_file_name, 'w')
874
+
875
+ out_file.write('Test results generated by attrgenfunctTest.py' + os.linesep)
876
+
877
+ out_file.write('Test started: ' + curr_time_str + os.linesep)
878
+
879
+ out_file.write(os.linesep)
880
+
881
+ out_file.write(
882
+ 'Module name,Class name,Method name,Arguments,Test_type,'
883
+ + 'Patterns tested,Summary,Failure description'
884
+ + os.linesep
885
+ )
886
+ out_file.write(os.linesep)
887
+
888
+ # Create instances for the testcase class that calls all tests
889
+ #
890
+ test_res_list = []
891
+ test_case_ins = TestCase('testArguments')
892
+ test_res_list += test_case_ins.testArguments(test_argument_data_dict)
893
+
894
+ test_case_ins = TestCase('testFunct_generate_phone_number_australia')
895
+ test_res_list += test_case_ins.testFunct_generate_phone_number_australia(num_tests)
896
+
897
+ test_case_ins = TestCase('testFunct_generate_credit_card_number')
898
+ test_res_list += test_case_ins.testFunct_generate_credit_card_number(num_tests)
899
+
900
+ test_case_ins = TestCase('testFunct_generate_uniform_value')
901
+ test_res_list += test_case_ins.testFunct_generate_uniform_value(num_tests)
902
+
903
+ test_case_ins = TestCase('testFunct_generate_uniform_age')
904
+ test_res_list += test_case_ins.testFunct_generate_uniform_age(num_tests)
905
+
906
+ test_case_ins = TestCase('testFunct_generate_normal_value')
907
+ test_res_list += test_case_ins.testFunct_generate_normal_value(num_tests)
908
+
909
+ test_case_ins = TestCase('testFunct_generate_normal_age')
910
+ test_res_list += test_case_ins.testFunct_generate_normal_age(num_tests)
911
+
912
+ # Write test output results into the log file
913
+ #
914
+ for line in test_res_list:
915
+ out_file.write(line + os.linesep)
916
+
917
+ out_file.close()
918
+
919
+ print('Test results are written to', out_file_name)
920
+
921
+ for line in test_res_list:
922
+ print(line)
tests/basefunctions_test.py ADDED
@@ -0,0 +1,1475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ import random
5
+ from pathlib import Path
6
+
7
+ random.seed(42)
8
+ import time
9
+ import unittest
10
+
11
+ from geco_data_generator import basefunctions
12
+
13
+
14
+ def f1(x):
15
+ print(x)
16
+
17
+
18
+ def f2():
19
+ print('hello')
20
+
21
+
22
+ # Define argument test cases here
23
+ #
24
+ test_argument_data_dict = {
25
+ ('basefunctions', 'n/a', 'check_is_not_none'): {
26
+ 'variable': [
27
+ [['testArgument', 'test'], ['hello', 'test'], ['1.0', 'test']],
28
+ [
29
+ [None, 'test'],
30
+ ['', 'test'],
31
+ [123, 'test'],
32
+ [0.234, 'test'],
33
+ [{}, 'test'],
34
+ [[], 'test'],
35
+ ],
36
+ ],
37
+ 'value': [
38
+ [
39
+ ['testArgument', 'hello'],
40
+ ['testArgument', 123],
41
+ ['testArgument', 0.234],
42
+ ['testArgument', []],
43
+ ['testArgument', {}],
44
+ ],
45
+ [['testArgument', None]],
46
+ ],
47
+ },
48
+ ('basefunctions', 'n/a', 'check_is_string'): {
49
+ 'variable': [
50
+ [['testArgument', 'hello'], ['test', 'hello'], ['1.0', 'hello']],
51
+ [
52
+ [None, 'hello'],
53
+ ['', 'hello'],
54
+ [123, 'hello'],
55
+ [0.234, 'hello'],
56
+ [{}, 'hello'],
57
+ [[], 'hello'],
58
+ ],
59
+ ],
60
+ 'value': [
61
+ [
62
+ ['testArgument', 'hello'],
63
+ ['testArgument', ''],
64
+ ['testArgument', '123'],
65
+ ['testArgument', '-1.23'],
66
+ ['testArgument', "'!?!'"],
67
+ ['testArgument', "HELlo"],
68
+ ['testArgument', "[..]"],
69
+ ],
70
+ [
71
+ ['testArgument', None],
72
+ ['testArgument', 123],
73
+ ['testArgument', 1.87],
74
+ ['testArgument', -75],
75
+ ],
76
+ ],
77
+ },
78
+ ('basefunctions', 'n/a', 'check_is_unicode_string'): {
79
+ 'variable': [
80
+ [['testArgument', u'hello'], ['test', u'hello'], ['1.0', u'hello']],
81
+ [
82
+ [None, u'hello'],
83
+ ['', u'hello'],
84
+ [123, u'hello'],
85
+ [0.234, u'hello'],
86
+ [{}, u'hello'],
87
+ [[], u'hello'],
88
+ ],
89
+ ],
90
+ 'value': [
91
+ [
92
+ ['testArgument', u'hello'],
93
+ ['testArgument', u'text'],
94
+ ['testArgument', u''],
95
+ ['testArgument', u'123'],
96
+ ],
97
+ [
98
+ ['testArgument', None],
99
+ ['testArgument', ''],
100
+ ['testArgument', -123],
101
+ ['testArgument', 123],
102
+ ['testArgument', 1.87],
103
+ ['testArgument', 'ascii'],
104
+ ],
105
+ ],
106
+ },
107
+ ('basefunctions', 'n/a', 'check_is_string_or_unicode_string'): {
108
+ 'variable': [
109
+ [
110
+ ['testArgument', 'hello'],
111
+ ['test', u'hello'],
112
+ ['1.0', 'hello2'],
113
+ ['1.0', u'hello'],
114
+ ],
115
+ [
116
+ [None, u'hello'],
117
+ ['', 'hello'],
118
+ [123, u'hello'],
119
+ [0.234, 'hello'],
120
+ [{}, u'hello'],
121
+ [[], u'hello'],
122
+ ],
123
+ ],
124
+ 'value': [
125
+ [
126
+ ['testArgument', u'hello'],
127
+ ['testArgument', 'text'],
128
+ ['testArgument', u''],
129
+ ['testArgument', u'123'],
130
+ ['testArgument', ''],
131
+ ['testArgument', '123'],
132
+ ],
133
+ [
134
+ ['testArgument', None],
135
+ ['testArgument', 123.45],
136
+ ['testArgument', -123],
137
+ ['testArgument', -123.65],
138
+ ['testArgument', {}],
139
+ ['testArgument', []],
140
+ ],
141
+ ],
142
+ },
143
+ ('basefunctions', 'n/a', 'check_is_non_empty_string'): {
144
+ 'variable': [
145
+ [['testArgument', 'hello'], ['test', 'hello'], ['1.0', 'hello']],
146
+ [
147
+ [None, 'hello'],
148
+ ['', 'hello'],
149
+ [123, 'hello'],
150
+ [0.234, 'hello'],
151
+ [{}, 'hello'],
152
+ [[], 'hello'],
153
+ ],
154
+ ],
155
+ 'value': [
156
+ [
157
+ ['testArgument', 'hello'],
158
+ ['testArgument', '123'],
159
+ ['testArgument', '-1.23'],
160
+ ['testArgument', 'HELlo'],
161
+ ['testArgument', "'!?!'"],
162
+ ['testArgument', "[..]"],
163
+ ],
164
+ [
165
+ ['testArgument', None],
166
+ ['testArgument', 123],
167
+ ['testArgument', 1.87],
168
+ ['testArgument', -75],
169
+ ['testArgument', ''],
170
+ ['testArgument', []],
171
+ ['testArgument', {}],
172
+ ],
173
+ ],
174
+ }, # , ['testArgument','hello']
175
+ ('basefunctions', 'n/a', 'check_is_number'): {
176
+ 'variable': [
177
+ [['testArgument', 123], ['test', 123], ['1.0', 123]],
178
+ [[None, 123], ['', 123], [123, 123], [0.234, 123], [{}, 123], [[], 123]],
179
+ ],
180
+ 'value': [
181
+ [
182
+ ['testArgument', 0],
183
+ ['testArgument', -0],
184
+ ['testArgument', 1.23],
185
+ ['testArgument', -24.41],
186
+ ['testArgument', 1289837],
187
+ ['testArgument', -973293],
188
+ ],
189
+ [
190
+ ['testArgument', None],
191
+ ['testArgument', 'hello'],
192
+ ['testArgument', '123'],
193
+ ['testArgument', '[]'],
194
+ ['testArgument', '-123.34'],
195
+ ['testArgument', ''],
196
+ ['testArgument', []],
197
+ ['testArgument', {}],
198
+ ],
199
+ ],
200
+ },
201
+ ('basefunctions', 'n/a', 'check_is_positive'): {
202
+ 'variable': [
203
+ [['testArgument', 1.0], ['test', 1.0], ['1.0', 1.0]],
204
+ [[None, 1.0], ['', 1.0], [123, 1.0], [0.234, 1.0], [{}, 1.0], [[], 1.0]],
205
+ ],
206
+ 'value': [
207
+ [
208
+ ['testArgument', 1.0],
209
+ ['testArgument', 0.001],
210
+ ['testArgument', 0.0000001],
211
+ ['testArgument', 1],
212
+ ['testArgument', 1.474],
213
+ ['testArgument', 1236967],
214
+ ['testArgument', 17676.474],
215
+ ],
216
+ [
217
+ ['testArgument', None],
218
+ ['testArgument', -1.23],
219
+ ['testArgument', -123],
220
+ ['testArgument', -100000000],
221
+ ['testArgument', 'hello'],
222
+ ['testArgument', '123'],
223
+ ['testArgument', []],
224
+ ['testArgument', {}],
225
+ ],
226
+ ],
227
+ },
228
+ ('basefunctions', 'n/a', 'check_is_not_negative'): {
229
+ 'variable': [
230
+ [['testArgument', 1.0], ['test', 1.0], ['1.0', 1.0]],
231
+ [[None, 1.0], ['', 1.0], [123, 1.0], [0.234, 1.0], [{}, 1.0], [[], 1.0]],
232
+ ],
233
+ 'value': [
234
+ [
235
+ ['testArgument', 0],
236
+ ['testArgument', -0],
237
+ ['testArgument', 1.0],
238
+ ['testArgument', 0.00],
239
+ ['testArgument', -0.000],
240
+ ['testArgument', 1],
241
+ ['testArgument', 1.474],
242
+ ['testArgument', 1236987],
243
+ ['testArgument', 17676.474],
244
+ ],
245
+ [
246
+ ['testArgument', None],
247
+ ['testArgument', -1.23],
248
+ ['testArgument', -123],
249
+ ['testArgument', -100000000],
250
+ ['testArgument', 'hello'],
251
+ ['testArgument', '123'],
252
+ ['testArgument', []],
253
+ ['testArgument', {}],
254
+ ],
255
+ ],
256
+ },
257
+ ('basefunctions', 'n/a', 'check_is_normalised'): {
258
+ 'variable': [
259
+ [['testArgument', 1.0], ['test', 1.0], ['1.0', 1.0]],
260
+ [[None, 1.0], ['', 1.0], [123, 1.0], [0.234, 1.0], [{}, 1.0], [[], 1.0]],
261
+ ],
262
+ 'value': [
263
+ [
264
+ ['testArgument', 0],
265
+ ['testArgument', -0],
266
+ ['testArgument', 0.00],
267
+ ['testArgument', -0.0],
268
+ ['testArgument', 1],
269
+ ['testArgument', 1.0],
270
+ ['testArgument', 0.0001],
271
+ ['testArgument', 1.00000],
272
+ ['testArgument', 0.5],
273
+ ['testArgument', 0.9999],
274
+ ],
275
+ [
276
+ ['testArgument', None],
277
+ ['testArgument', -1.23],
278
+ ['testArgument', -123],
279
+ ['testArgument', 100],
280
+ ['testArgument', 1.0001],
281
+ ['testArgument', -1.0],
282
+ ['testArgument', 'hello'],
283
+ ['testArgument', '0.7'],
284
+ ['testArgument', []],
285
+ ['testArgument', {}],
286
+ ],
287
+ ],
288
+ },
289
+ ('basefunctions', 'n/a', 'check_is_percentage'): {
290
+ 'variable': [
291
+ [['testArgument', 10.0], ['test', 10.0], ['1.0', 10.0]],
292
+ [[None, 10.0], ['', 10.0], [123, 10.0], [0.234, 10.0], [{}, 10.0], [[], 10.0]],
293
+ ],
294
+ 'value': [
295
+ [
296
+ ['testArgument', 0],
297
+ ['testArgument', -0],
298
+ ['testArgument', 0.00],
299
+ ['testArgument', -0.0],
300
+ ['testArgument', 1],
301
+ ['testArgument', 1.0],
302
+ ['testArgument', 0.0001],
303
+ ['testArgument', 99.000],
304
+ ['testArgument', 100],
305
+ ['testArgument', 0.5],
306
+ ['testArgument', 50],
307
+ ['testArgument', 50.001],
308
+ ['testArgument', 100.0],
309
+ ['testArgument', 0.9999],
310
+ ],
311
+ [
312
+ ['testArgument', None],
313
+ ['testArgument', -1.23],
314
+ ['testArgument', -123],
315
+ ['testArgument', 100.001],
316
+ ['testArgument', -0.0001],
317
+ ['testArgument', 'hello'],
318
+ ['testArgument', '85'],
319
+ ['testArgument', '45%'],
320
+ ['testArgument', []],
321
+ ['testArgument', {}],
322
+ ],
323
+ ],
324
+ },
325
+ ('basefunctions', 'n/a', 'check_is_integer'): {
326
+ 'variable': [
327
+ [['testArgument', 10], ['test', 10], ['1.0', 10]],
328
+ [[None, 10], ['', 10], [123, 10], [0.234, 10], [{}, 10], [[], 10]],
329
+ ],
330
+ 'value': [
331
+ [
332
+ ['testArgument', 0],
333
+ ['testArgument', 1],
334
+ ['testArgument', 10],
335
+ ['testArgument', 1234],
336
+ ['testArgument', -1],
337
+ ['testArgument', -96234],
338
+ ['testArgument', -100],
339
+ ['testArgument', -0],
340
+ ],
341
+ [
342
+ ['testArgument', None],
343
+ ['testArgument', -1.23],
344
+ ['testArgument', 1.23],
345
+ ['testArgument', -0.0001],
346
+ ['testArgument', 0.001],
347
+ ['testArgument', 'hello'],
348
+ ['testArgument', '85'],
349
+ ['testArgument', 10000.0],
350
+ ['testArgument', []],
351
+ ['testArgument', {}],
352
+ ],
353
+ ],
354
+ },
355
+ ('basefunctions', 'n/a', 'check_is_float'): {
356
+ 'variable': [
357
+ [['testArgument', 1.0], ['test', 1.0], ['1.0', 1.0]],
358
+ [[None, 1.0], ['', 1.0], [123, 1.0], [0.234, 1.0], [{}, 1.0], [[], 1.0]],
359
+ ],
360
+ 'value': [
361
+ [
362
+ ['testArgument', 0.0],
363
+ ['testArgument', -0.0],
364
+ ['testArgument', 0.123],
365
+ ['testArgument', -65.9203],
366
+ ['testArgument', 42.123],
367
+ ['testArgument', -10000.0],
368
+ ],
369
+ [
370
+ ['testArgument', None],
371
+ ['testArgument', -123],
372
+ ['testArgument', 123],
373
+ ['testArgument', 0],
374
+ ['testArgument', 100000],
375
+ ['testArgument', 'hello'],
376
+ ['testArgument', '8.5'],
377
+ ['testArgument', -0],
378
+ ['testArgument', []],
379
+ ['testArgument', {}],
380
+ ],
381
+ ],
382
+ },
383
+ ('basefunctions', 'n/a', 'check_is_dictionary'): {
384
+ 'variable': [
385
+ [['testArgument', {}], ['test', {}], ['1.0', {}]],
386
+ [[None, {}], ['', {}], [123, {}], [0.234, {}], [{}, {}], [[], {}]],
387
+ ],
388
+ 'value': [
389
+ [
390
+ ['testArgument', {}],
391
+ ['testArgument', {1: 2, 6: 0}],
392
+ ['testArgument', {'a': 4, 't': 1, (1, 4, 6): 'tr'}],
393
+ ],
394
+ [
395
+ ['testArgument', None],
396
+ ['testArgument', "{1:2,3:4}"],
397
+ ['testArgument', []],
398
+ ['testArgument', set()],
399
+ ],
400
+ ],
401
+ },
402
+ ('basefunctions', 'n/a', 'check_is_list'): {
403
+ 'variable': [
404
+ [['testArgument', []], ['test', []], ['1.0', []]],
405
+ [[None, []], ['', []], [123, []], [0.234, []], [{}, []], [[], []]],
406
+ ],
407
+ 'value': [
408
+ [
409
+ ['testArgument', []],
410
+ ['testArgument', [1, 3, 5]],
411
+ ['testArgument', [-1, -3, -5]],
412
+ ['testArgument', ['a', '56', 1, {}]],
413
+ ],
414
+ [
415
+ ['testArgument', None],
416
+ ['testArgument', "[1,2,3,4]"],
417
+ ['testArgument', {}],
418
+ ['testArgument', set()],
419
+ ],
420
+ ],
421
+ },
422
+ ('basefunctions', 'n/a', 'check_is_set'): {
423
+ 'variable': [
424
+ [['testArgument', set()], ['test', set()], ['1.0', set()]],
425
+ [
426
+ [None, set()],
427
+ ['', set()],
428
+ [123, set()],
429
+ [0.234, set()],
430
+ [{}, set()],
431
+ [[], set()],
432
+ ],
433
+ ],
434
+ 'value': [
435
+ [
436
+ ['testArgument', set()],
437
+ ['testArgument', set([1, 2, 3])],
438
+ ['testArgument', set(['a', 'a'])],
439
+ ['testArgument', set(['a', '56', 1, 100.345])],
440
+ ],
441
+ [
442
+ ['testArgument', None],
443
+ ['testArgument', "set([1,2,3])"],
444
+ ['testArgument', [1, 2, 3, 4]],
445
+ ['testArgument', {1: 2, 5: 6}],
446
+ ['testArgument', {}],
447
+ ['testArgument', []],
448
+ ],
449
+ ],
450
+ },
451
+ ('basefunctions', 'n/a', 'check_is_tuple'): {
452
+ 'variable': [
453
+ [['testArgument', ()], ['test', ()], ['1.0', ()]],
454
+ [[None, ()], ['', ()], [123, ()], [0.234, ()], [{}, ()], [[], ()]],
455
+ ],
456
+ 'value': [
457
+ [
458
+ ['testArgument', ()],
459
+ ['testArgument', ('a', 'b')],
460
+ ['testArgument', (42, 'b')],
461
+ ['testArgument', (1, 100)],
462
+ ['testArgument', ('a', 'b', 'c', 1, 2, 3)],
463
+ ],
464
+ [
465
+ ['testArgument', None],
466
+ ['testArgument', [1, 2, 3, 4]],
467
+ ['testArgument', {1: 2, 5: 6}],
468
+ ['testArgument', set([1, 2, 3])],
469
+ ['testArgument', "(1,2,3)"],
470
+ ['testArgument', []],
471
+ ['testArgument', {}],
472
+ ['testArgument', set()],
473
+ ],
474
+ ],
475
+ },
476
+ ('basefunctions', 'n/a', 'check_is_flag'): {
477
+ 'variable': [
478
+ [['testArgument', True], ['test', True], ['1.0', True]],
479
+ [[None, True], ['', True], [123, True], [0.234, True], [{}, True], [[], True]],
480
+ ],
481
+ 'value': [
482
+ [
483
+ ['testArgument', True],
484
+ ['testArgument', False],
485
+ ['testArgument', 0],
486
+ ['testArgument', 1],
487
+ ['testArgument', 0.0],
488
+ ['testArgument', 1.0],
489
+ ],
490
+ [
491
+ ['testArgument', None],
492
+ ['testArgument', 'True'],
493
+ ['testArgument', 'False'],
494
+ ['testArgument', 0.01],
495
+ ['testArgument', 1.01],
496
+ ['testArgument', '1.0'],
497
+ ],
498
+ ],
499
+ },
500
+ ('basefunctions', 'n/a', 'check_unicode_encoding_exists'): {
501
+ 'unicode_encoding_string': [
502
+ [["ascii"], ["iso-8859-1"], ["ASCII"]],
503
+ [[None], ["asciii"], [123], ['']],
504
+ ]
505
+ },
506
+ ('basefunctions', 'n/a', 'check_is_function_or_method'): {
507
+ 'variable': [
508
+ [['testArgument', f1], ['test', f1], ['1.0', f1]],
509
+ [[None, f1], ['', f1], [123, f1], [0.234, f1], [{}, f1], [[], f1]],
510
+ ],
511
+ 'value': [
512
+ [
513
+ ['testArgument', f1],
514
+ ['testArgument', f2],
515
+ ['testArgument', basefunctions.check_is_not_none],
516
+ ['testArgument', basefunctions.check_is_function_or_method],
517
+ ],
518
+ [
519
+ ['testArgument', None],
520
+ ['testArgument', 'f1'],
521
+ ['testArgument', 'f2'],
522
+ ['testArgument', 0.0],
523
+ ['testArgument', []],
524
+ ['testArgument', {}],
525
+ ],
526
+ ],
527
+ },
528
+ ('basefunctions', 'n/a', 'char_set_ascii'): {
529
+ 'string_variable': [
530
+ [
531
+ ["hello"],
532
+ ["1256783"],
533
+ ["hello1234test5678"],
534
+ ["hello 1234 test 5678"],
535
+ [' 1 a 2 b '],
536
+ ],
537
+ [[None], [0.2345], [123], [[]], [{}], [-5434.6]],
538
+ ]
539
+ },
540
+ ('basefunctions', 'n/a', 'check_is_valid_format_str'): {
541
+ 'variable': [
542
+ [['testArgument', 'int'], ['test', 'int'], ['1.0', 'int']],
543
+ [
544
+ [None, 'int'],
545
+ ['', 'int'],
546
+ [123, 'int'],
547
+ [0.234, 'int'],
548
+ [{}, 'int'],
549
+ [[], 'int'],
550
+ ],
551
+ ],
552
+ 'value': [
553
+ [
554
+ ['testArgument', 'int'],
555
+ ['testArgument', 'float1'],
556
+ ['testArgument', 'float2'],
557
+ ['testArgument', 'float3'],
558
+ ['testArgument', 'float4'],
559
+ ['testArgument', 'float5'],
560
+ ['testArgument', 'float6'],
561
+ ['testArgument', 'float7'],
562
+ ['testArgument', 'float8'],
563
+ ['testArgument', 'float9'],
564
+ ],
565
+ [
566
+ ['testArgument', None],
567
+ ['testArgument', ''],
568
+ ['testArgument', 'float10'],
569
+ ['testArgument', 'int1'],
570
+ ['testArgument', 'floet1'],
571
+ ['testArgument', 1],
572
+ ['testArgument', []],
573
+ ['testArgument', {}],
574
+ ],
575
+ ],
576
+ },
577
+ ('basefunctions', 'n/a', 'float_to_str'): {
578
+ 'number_variable': [
579
+ [
580
+ [1234, 'int'],
581
+ [123.4, 'int'],
582
+ [1.0004, 'int'],
583
+ [1000000000, 'int'],
584
+ [-12345.678, 'int'],
585
+ ],
586
+ [
587
+ [None, 'int'],
588
+ ['', 'int'],
589
+ ['123', 'int'],
590
+ ['456.98', 'int'],
591
+ [{}, 'int'],
592
+ [[], 'int'],
593
+ ],
594
+ ],
595
+ 'format_string': [
596
+ [
597
+ [100, 'int'],
598
+ [100, 'float1'],
599
+ [100, 'float2'],
600
+ [100, 'float3'],
601
+ [100, 'float4'],
602
+ [100, 'float5'],
603
+ [100, 'float6'],
604
+ [100, 'float7'],
605
+ [100, 'float8'],
606
+ [100, 'float9'],
607
+ ],
608
+ [
609
+ [100, None],
610
+ [100, ''],
611
+ [100, 'float10'],
612
+ [100, 'int1'],
613
+ [100, 'floet1'],
614
+ [100, 1],
615
+ [100, []],
616
+ [100, {}],
617
+ ],
618
+ ],
619
+ },
620
+ ('basefunctions', 'n/a', 'str2comma_separated_list'): {
621
+ 'string_variable': [
622
+ [
623
+ [u"ab,cd,ef"],
624
+ [u"12,567,83"],
625
+ [u"hello,1234,test,5678"],
626
+ [u"hello ,1234 ,test ,5678"],
627
+ [u' 1 a, 2 b '],
628
+ [u'as$,bc#'],
629
+ [u'abcdef'],
630
+ [u' , '],
631
+ ],
632
+ [[None], [0.2345], [123], [''], [], {}],
633
+ ]
634
+ },
635
+ ('basefunctions', 'n/a', 'read_csv_file'): {
636
+ 'file_name': [
637
+ [
638
+ ['test1.csv', "ascii", False], # Assume these test files exist
639
+ ['test3.csv', "ascii", False],
640
+ ['test2.txt', "ascii", False],
641
+ ],
642
+ [
643
+ [None, "ascii", False],
644
+ ['', "ascii", False],
645
+ ['test3.csvv', "ascii", False],
646
+ [234, "ascii", False],
647
+ [{}, "ascii", False],
648
+ [[], "ascii", False],
649
+ ],
650
+ ],
651
+ 'encoding': [
652
+ [
653
+ ['test1.csv', "ascii", False],
654
+ ['test1.csv', "iso-8859-1", False],
655
+ ['test1.csv', "ASCII", False],
656
+ ['test1.csv', None, False],
657
+ ],
658
+ [
659
+ ['test1.csv', '', False],
660
+ ['test1.csv', 'asciii', False],
661
+ ['test1.csv', 'ascii encode', False],
662
+ ['test1.csv', 123, False],
663
+ ['test1.csv', [], False],
664
+ ['test1.csv', {}, False],
665
+ ],
666
+ ],
667
+ 'header_line': [
668
+ [
669
+ ['test1.csv', "ascii", True],
670
+ ['test1.csv', "ascii", False],
671
+ ['test1.csv', "ascii", 1.0],
672
+ ['test1.csv', "ascii", 1],
673
+ ['test1.csv', "ascii", 0],
674
+ ['test1.csv', "ascii", 0.0],
675
+ ],
676
+ [
677
+ ['test1.csv', "ascii", None],
678
+ ['test1.csv', "ascii", 'True'],
679
+ ['test1.csv', "ascii", '1.0'],
680
+ ['test1.csv', "ascii", 1.01],
681
+ ['test1.csv', "ascii", 0.01],
682
+ ['test1.csv', "ascii", ''],
683
+ ],
684
+ ],
685
+ },
686
+ ('basefunctions', 'n/a', 'write_csv_file'): {
687
+ 'file_name': [
688
+ [
689
+ ['test.csv', "ascii", None, []],
690
+ ['test.csv', "ascii", None, []],
691
+ ['test.txt', "ascii", None, []],
692
+ ['test.csv', "ascii", None, []],
693
+ ],
694
+ [
695
+ [None, "ascii", None, []],
696
+ ['', "ascii", None, []],
697
+ ['test-3/csvv', "ascii", None, []],
698
+ [234, "ascii", None, []],
699
+ [{}, "ascii", None, []],
700
+ [[], "ascii", None, []],
701
+ ],
702
+ ],
703
+ 'encoding': [
704
+ [
705
+ ['test.csv', "ascii", None, []],
706
+ ['test.csv', "iso-8859-1", None, []],
707
+ ['test.csv', "ASCII", None, []],
708
+ ['test.csv', None, None, []],
709
+ ],
710
+ [
711
+ ['test.csv', '', None, []],
712
+ ['test.csv', 'asciii', None, []],
713
+ ['test.csv', 'ascii encode', None, []],
714
+ ['test.csv', 123, None, []],
715
+ ['test.csv', [], None, []],
716
+ ['test.csv', {}, None, []],
717
+ ],
718
+ ],
719
+ 'header_list': [
720
+ [
721
+ ['test.csv', "ascii", None, []],
722
+ ['test.csv', "ascii", [], []],
723
+ ['test.csv', "ascii", ['attr1'], []],
724
+ ['test.csv', "ascii", ['attr1', 'attr2'], []],
725
+ ['test.csv', "ascii", [' attr1 '], []],
726
+ ['test.csv', "ascii", [''], []],
727
+ ],
728
+ [
729
+ ['test.csv', "ascii", '', []],
730
+ ['test.csv', "ascii", 'attr1', []],
731
+ ['test.csv', "ascii", 'attr1,attr2', []],
732
+ ['test.csv', "ascii", 1, []],
733
+ ['test.csv', "ascii", set(['attr1']), []],
734
+ ['test.csv', "ascii", {1: 'attr1'}, []],
735
+ ],
736
+ ],
737
+ 'file_data': [
738
+ [
739
+ ['test.csv', "ascii", None, []],
740
+ ['test.csv', "ascii", None, [['test']]],
741
+ [
742
+ 'test.csv',
743
+ "ascii",
744
+ None,
745
+ [['1'], ['2'], ['3', '4'], ['5', '6', '7'], ['8,9,10']],
746
+ ],
747
+ ['test.csv', "ascii", None, [['a', 'b'], ['c', 'd', 'e']]],
748
+ [
749
+ 'test.csv',
750
+ "ascii",
751
+ None,
752
+ [['a'], ['1', '2'], ['b', '$%'], ['', '10.34']],
753
+ ],
754
+ ['test.csv', "ascii", None, [['']]],
755
+ ],
756
+ [
757
+ ['test.csv', "ascii", None, None],
758
+ ['test.csv', "ascii", None, 'test'],
759
+ ['test.csv', "ascii", None, ['test']],
760
+ ['test.csv', "ascii", None, [[1, 2], [3, 4]]],
761
+ ['test.csv', "ascii", None, {}],
762
+ ['test.csv', "ascii", None, set()],
763
+ ['test.csv', "ascii", None, ''],
764
+ ],
765
+ ],
766
+ },
767
+ }
768
+
769
+ # =============================================================================
770
+
771
+
772
+ class TestCase(unittest.TestCase):
773
+
774
+ # Initialise test case - - - - - - - - - - - - - - - - - - - - - - - - - - -
775
+ #
776
+ def setUp(self):
777
+ pass # Nothing to initialize
778
+
779
+ # Clean up test case - - - - - - - - - - - - - - - - - - - - - - - - - - - -
780
+ #
781
+ def tearDown(self):
782
+ pass # Nothing to clean up
783
+
784
+ # ---------------------------------------------------------------------------
785
+ # Start test cases
786
+
787
+ def testArguments(self, test_data):
788
+ """Test if a function or method can be called or initialised correctly with
789
+ different values for their input arguments (parameters).
790
+
791
+ The argument 'test_data' must be a dictionary with the following
792
+ structure:
793
+
794
+ - Keys are tuples consisting of three strings:
795
+ (module_name, class_name, function_or_method_name)
796
+ - Values are dictionaries where the keys are the names of the input
797
+ argument that is being tested, and the values of these dictionaries
798
+ are a list that contains two lists. The first list contains valid
799
+ input arguments ('normal' argument tests) that should pass the test,
800
+ while the second list contains illegal input arguments ('exception'
801
+ argument tests) that should raise an exception.
802
+
803
+ The lists of test cases are itself lists, each containing a number of
804
+ input argument values, as many as are expected by the function or
805
+ method that is being tested.
806
+
807
+ This function returns a list containing the test results, where each
808
+ list element is a string with comma separated values (CSV) which are to
809
+ be written into the testing log file.
810
+ """
811
+
812
+ test_res_list = [] # The test results, one element per element in the test
813
+ # data dictionary
814
+
815
+ for test_method_names, test_method_data in test_data.items():
816
+ test_method_name = test_method_names[2]
817
+ print('Testing arguments for method/function:', test_method_name)
818
+
819
+ for argument_name in test_method_data:
820
+ print(' Testing input argument:', argument_name)
821
+
822
+ norm_test_data = test_method_data[argument_name][0]
823
+ exce_test_data = test_method_data[argument_name][1]
824
+ print(' Normal test cases: ', norm_test_data)
825
+ print(' Exception test cases:', exce_test_data)
826
+
827
+ # Conduct normal tests - - - - - - - - - - - - - - - - - - - - - - - -
828
+ #
829
+ num_norm_test_cases = len(norm_test_data)
830
+ num_norm_test_passed = 0
831
+ num_norm_test_failed = 0
832
+ norm_failed_desc_str = ''
833
+
834
+ for test_input in norm_test_data:
835
+ passed = True # Assume the test will pass :-)
836
+
837
+ if len(test_input) == 0: # Method has no input argument
838
+ try:
839
+ getattr(basefunctions, test_method_name)()
840
+ except:
841
+ passed = False
842
+
843
+ elif len(test_input) == 1: # Method has one input argument
844
+ try:
845
+ getattr(basefunctions, test_method_name)(test_input[0])
846
+ except:
847
+ passed = False
848
+
849
+ elif len(test_input) == 2: # Method has two input arguments
850
+ try:
851
+ getattr(basefunctions, test_method_name)(
852
+ test_input[0], test_input[1]
853
+ )
854
+ except:
855
+ passed = False
856
+
857
+ elif len(test_input) == 3: # Method has three input arguments
858
+ try:
859
+ getattr(basefunctions, test_method_name)(
860
+ test_input[0], test_input[1], test_input[2]
861
+ )
862
+ except:
863
+ passed = False
864
+
865
+ elif len(test_input) == 4: # Method has four input arguments
866
+ try:
867
+ getattr(basefunctions, test_method_name)(
868
+ test_input[0], test_input[1], test_input[2], test_input[3]
869
+ )
870
+ except:
871
+ passed = False
872
+
873
+ elif len(test_input) == 5: # Method has five input arguments
874
+ try:
875
+ getattr(basefunctions, test_method_name)(
876
+ test_input[0],
877
+ test_input[1],
878
+ test_input[2],
879
+ test_input[3],
880
+ test_input[4],
881
+ )
882
+ except:
883
+ passed = False
884
+
885
+ else:
886
+ raise Exception('Illegal number of input arguments')
887
+
888
+ # Now process test results
889
+ #
890
+ if passed == False:
891
+ num_norm_test_failed += 1
892
+ norm_failed_desc_str += 'Failed test for input ' + "'%s'; " % (
893
+ str(test_input)
894
+ )
895
+ else:
896
+ num_norm_test_passed += 1
897
+
898
+ assert num_norm_test_failed + num_norm_test_passed == num_norm_test_cases
899
+
900
+ norm_test_result_str = (
901
+ test_method_names[0]
902
+ + ','
903
+ + test_method_names[1]
904
+ + ','
905
+ + test_method_names[2]
906
+ + ','
907
+ + argument_name
908
+ + ',normal,'
909
+ + '%d,' % (num_norm_test_cases)
910
+ )
911
+ if num_norm_test_failed == 0:
912
+ norm_test_result_str += 'all tests passed'
913
+ else:
914
+ norm_test_result_str += '%d tests failed,' % (num_norm_test_failed)
915
+ norm_test_result_str += '"' + norm_failed_desc_str[:-2] + '"'
916
+
917
+ test_res_list.append(norm_test_result_str)
918
+
919
+ # Conduct exception tests - - - - - - - - - - - - - - - - - - - - - - -
920
+ #
921
+ num_exce_test_cases = len(exce_test_data)
922
+ num_exce_test_passed = 0
923
+ num_exce_test_failed = 0
924
+ exce_failed_desc_str = ''
925
+
926
+ for test_input in exce_test_data:
927
+ passed = True # Assume the test will pass (i.e. raise an exception)
928
+
929
+ if len(test_input) == 0: # Method has no input argument
930
+ try:
931
+ self.assertRaises(
932
+ Exception, getattr(basefunctions, test_method_name)
933
+ )
934
+ except:
935
+ passed = False
936
+
937
+ elif len(test_input) == 1: # Method has one input argument
938
+ try:
939
+ self.assertRaises(
940
+ Exception,
941
+ getattr(basefunctions, test_method_name),
942
+ test_input[0],
943
+ )
944
+ except:
945
+ passed = False
946
+
947
+ elif len(test_input) == 2: # Method has two input arguments
948
+ try:
949
+ self.assertRaises(
950
+ Exception,
951
+ getattr(basefunctions, test_method_name),
952
+ test_input[0],
953
+ test_input[1],
954
+ )
955
+ except:
956
+ passed = False
957
+
958
+ elif len(test_input) == 3: # Method has three input arguments
959
+ try:
960
+ self.assertRaises(
961
+ Exception,
962
+ getattr(basefunctions, test_method_name),
963
+ test_input[0],
964
+ test_input[1],
965
+ test_input[2],
966
+ )
967
+ except:
968
+ passed = False
969
+
970
+ elif len(test_input) == 4: # Method has four input arguments
971
+ try:
972
+ self.assertRaises(
973
+ Exception,
974
+ getattr(basefunctions, test_method_name),
975
+ test_input[0],
976
+ test_input[1],
977
+ test_input[2],
978
+ test_input[3],
979
+ )
980
+ except:
981
+ passed = False
982
+
983
+ elif len(test_input) == 5: # Method has five input arguments
984
+ try:
985
+ self.assertRaises(
986
+ Exception,
987
+ getattr(basefunctions, test_method_name),
988
+ test_input[0],
989
+ test_input[1],
990
+ test_input[2],
991
+ test_input[3],
992
+ test_input[4],
993
+ )
994
+ except:
995
+ passed = False
996
+
997
+ else:
998
+ raise Exception('Illegal number of input arguments')
999
+
1000
+ # Now process test results
1001
+ #
1002
+ if passed == False:
1003
+ num_exce_test_failed += 1
1004
+ exce_failed_desc_str += 'Failed test for input ' + "'%s'; " % (
1005
+ str(test_input)
1006
+ )
1007
+ else:
1008
+ num_exce_test_passed += 1
1009
+
1010
+ assert num_exce_test_failed + num_exce_test_passed == num_exce_test_cases
1011
+
1012
+ exce_test_result_str = (
1013
+ test_method_names[0]
1014
+ + ','
1015
+ + test_method_names[1]
1016
+ + ','
1017
+ + test_method_names[2]
1018
+ + ','
1019
+ + argument_name
1020
+ + ',exception,'
1021
+ + '%d,' % (num_exce_test_cases)
1022
+ )
1023
+ if num_exce_test_failed == 0:
1024
+ exce_test_result_str += 'all tests passed'
1025
+ else:
1026
+ exce_test_result_str += '%d tests failed,' % (num_exce_test_failed)
1027
+ exce_test_result_str += '"' + exce_failed_desc_str[:-2] + '"'
1028
+
1029
+ test_res_list.append(exce_test_result_str)
1030
+
1031
+ test_res_list.append('') # Empty line between tests of methods
1032
+
1033
+ return test_res_list
1034
+
1035
+ def testFunct_char_set_ascii(self):
1036
+ """Test the functionality of 'char_set_ascii', making sure this function
1037
+ returns a correct string containing the set of corresponding characters.
1038
+ """
1039
+
1040
+ print('Testing functionality of "char_set_ascii"')
1041
+
1042
+ num_passed = 0
1043
+ num_failed = 0
1044
+ num_tests = 0
1045
+ failed_tests_desc = ''
1046
+
1047
+ test_cases = {
1048
+ '0123456789': ['1', '234', '9746298136491', '99999999999999999999999999'],
1049
+ '0123456789 ': [
1050
+ '1 2 3',
1051
+ ' 0 0 0 ',
1052
+ ' 234' '409324 12430 32578',
1053
+ '0000000 00000',
1054
+ ],
1055
+ 'abcdefghijklmnopqrstuvwxyz': [
1056
+ 'abc',
1057
+ 'aaaaaaaaaaaa',
1058
+ 'aaabbbccc',
1059
+ 'cdhiofeakjbdakfhoweuar',
1060
+ 'ABC',
1061
+ ],
1062
+ 'abcdefghijklmnopqrstuvwxyz ': [
1063
+ ' a b c ',
1064
+ 'aaaa aaaa ',
1065
+ 'aaa bbb ccc',
1066
+ ' cdhiofeakjbdakfhoweuar',
1067
+ 'AB C',
1068
+ ],
1069
+ 'abcdefghijklmnopqrstuvwxyz0123456789': [
1070
+ '1234sdfj12998',
1071
+ '12345678a',
1072
+ 'afdadgf34kafh',
1073
+ '1a2b3c',
1074
+ ],
1075
+ 'abcdefghijklmnopqrstuvwxyz0123456789 ': [
1076
+ '1234 sdfj 12998',
1077
+ ' 12345678a ',
1078
+ 'afdadgf 34 kafh',
1079
+ ' 1 a 2 b 3 c ',
1080
+ ],
1081
+ }
1082
+
1083
+ for char_set_type in test_cases:
1084
+
1085
+ this_type_test_cases = test_cases[char_set_type]
1086
+
1087
+ for test_case in this_type_test_cases:
1088
+ if basefunctions.char_set_ascii(test_case) == char_set_type:
1089
+ num_passed += 1
1090
+ else:
1091
+ num_failed += 1
1092
+
1093
+ failed_tests_desc += "Failed with input string: '%s'; " % (test_case)
1094
+ num_tests += 1
1095
+
1096
+ test_result_str = 'basefunctions,n/a,char_set_ascii,' + 'n/a,funct,%d,' % (
1097
+ num_tests
1098
+ )
1099
+
1100
+ if num_failed == 0:
1101
+ test_result_str += 'all tests passed'
1102
+ else:
1103
+ test_result_str += '%d tests failed,' % (num_failed)
1104
+ test_result_str += '"' + failed_tests_desc[:-2] + '"'
1105
+
1106
+ return [test_result_str, '']
1107
+
1108
+ # ---------------------------------------------------------------------------
1109
+
1110
+ def testFunct_float_to_str(self):
1111
+ """Test the functionality of 'float_to_str', making sure this function
1112
+ returns a correct string of the number with the specified number of
1113
+ digits behind the comma.
1114
+ """
1115
+
1116
+ print('Testing functionality of "float_to_str"')
1117
+
1118
+ num_passed = 0
1119
+ num_failed = 0
1120
+ num_tests = 0
1121
+ failed_tests_desc = ''
1122
+
1123
+ test_cases = {
1124
+ 'int': {
1125
+ 1: '1',
1126
+ 1.0: '1',
1127
+ 123.0: '123',
1128
+ -123: '-123',
1129
+ 1000.0001: '1000',
1130
+ 56.7: '57',
1131
+ },
1132
+ 'float1': {
1133
+ 1: '1.0',
1134
+ 1.0: '1.0',
1135
+ -123: '-123.0',
1136
+ 1000.127: '1000.1',
1137
+ 56.78: '56.8',
1138
+ },
1139
+ 'float2': {
1140
+ 1: '1.00',
1141
+ 1.0: '1.00',
1142
+ -123: '-123.00',
1143
+ 1000.127: '1000.13',
1144
+ 56.78: '56.78',
1145
+ },
1146
+ 'float3': {
1147
+ 1: '1.000',
1148
+ -123: '-123.000',
1149
+ 999.999: '999.999',
1150
+ 999.99999: '1000.000',
1151
+ },
1152
+ 'float4': {
1153
+ 1: '1.0000',
1154
+ -1.0: '-1.0000',
1155
+ 4.56789: '4.5679',
1156
+ 999.99999: '1000.0000',
1157
+ },
1158
+ 'float5': {
1159
+ 1: '1.00000',
1160
+ -1.0: '-1.00000',
1161
+ 4.456789: '4.45679',
1162
+ 999.999999: '1000.00000',
1163
+ },
1164
+ 'float6': {
1165
+ 1: '1.000000',
1166
+ -123: '-123.000000',
1167
+ 4.3456789: '4.345679',
1168
+ 123.12: '123.120000',
1169
+ 999.9999999: '1000.000000',
1170
+ },
1171
+ 'float7': {
1172
+ 1: '1.0000000',
1173
+ -23.4: '-23.4000000',
1174
+ 4.23456789: '4.2345679',
1175
+ 123.12: '123.1200000',
1176
+ 999.99999999: '1000.0000000',
1177
+ },
1178
+ 'float8': {
1179
+ 1: '1.00000000',
1180
+ -1.0: '-1.00000000',
1181
+ 4.123456789: '4.12345679',
1182
+ 123.12: '123.12000000',
1183
+ 999.999999999: '1000.00000000',
1184
+ },
1185
+ 'float9': {
1186
+ 1: '1.000000000',
1187
+ -1.0: '-1.000000000',
1188
+ 4.0123456789: '4.012345679',
1189
+ 999.9999999999: '1000.000000000',
1190
+ },
1191
+ }
1192
+
1193
+ for format in test_cases:
1194
+
1195
+ this_format_test_cases = test_cases[format]
1196
+
1197
+ for input_num in this_format_test_cases:
1198
+ if (
1199
+ basefunctions.float_to_str(input_num, format)
1200
+ == this_format_test_cases[input_num]
1201
+ ):
1202
+ num_passed += 1
1203
+ else:
1204
+ num_failed += 1
1205
+ failed_tests_desc += "Failed with input number: '%s'; " % (
1206
+ str(input_num)
1207
+ )
1208
+ num_tests += 1
1209
+
1210
+ test_result_str = 'basefunctions,n/a,float_to_str,' + 'n/a,funct,%d,' % (num_tests)
1211
+
1212
+ if num_failed == 0:
1213
+ test_result_str += 'all tests passed'
1214
+ else:
1215
+ test_result_str += '%d tests failed,' % (num_failed)
1216
+ test_result_str += '"' + failed_tests_desc[:-2] + '"'
1217
+
1218
+ return [test_result_str, '']
1219
+
1220
+ # ---------------------------------------------------------------------------
1221
+
1222
+ def testFunct_str2comma_separated_list(self):
1223
+ """Test the functionality of 'str2comma_separated_list', making sure this
1224
+ function returns a correct list of values separated by the comma in the
1225
+ input string.
1226
+ """
1227
+
1228
+ print('Testing functionality of "str2comma_separated_list"')
1229
+
1230
+ num_passed = 0
1231
+ num_failed = 0
1232
+ num_tests = 0
1233
+
1234
+ failed_tests_desc = ''
1235
+
1236
+ test_cases = {
1237
+ u'123,456,789': ['123', '456', '789'],
1238
+ u'abcd,efgh,ij': ['abcd', 'efgh', 'ij'],
1239
+ u"abcd,efgh,ij": ['abcd', 'efgh', 'ij'],
1240
+ u'123,abc,f23': ['123', 'abc', 'f23'],
1241
+ u'000,000,000': ['000', '000', '000'],
1242
+ u'#$%,^&*,@?>': ['#$%', '^&*', '@?>'],
1243
+ u'abcd,123 ': ['abcd', '123'],
1244
+ u'123,45;6,7;89': ['123', '45;6', '7;89'],
1245
+ u'fd,g r,er,a w': ['fd', 'g r', 'er', 'a w'],
1246
+ u' fd,gr,er,aw ': ['fd', 'gr', 'er', 'aw'],
1247
+ u'123,456,': ['123', '456', ''],
1248
+ }
1249
+
1250
+ for string_val in test_cases:
1251
+
1252
+ if (
1253
+ basefunctions.str2comma_separated_list(string_val)
1254
+ == test_cases[string_val]
1255
+ ):
1256
+ num_passed += 1
1257
+ else:
1258
+ num_failed += 1
1259
+ failed_tests_desc += "Failed when string input: '%s'; " % (str(string_val))
1260
+ num_tests += 1
1261
+
1262
+ test_result_str = (
1263
+ 'basefunctions,n/a,str2comma_separated_list,' + 'n/a,funct,%d,' % (num_tests)
1264
+ )
1265
+
1266
+ if num_failed == 0:
1267
+ test_result_str += 'all tests passed'
1268
+ else:
1269
+ test_result_str += '%d tests failed,' % (num_failed)
1270
+ test_result_str += '"' + failed_tests_desc[:-2] + '"'
1271
+
1272
+ return [test_result_str, '']
1273
+
1274
+ # ---------------------------------------------------------------------------
1275
+
1276
+ def testFunct_read_csv_file(self):
1277
+ """Test the functionality of 'read_csv_file', making sure this function
1278
+ reads a CSV file and returns the correct content of the file.
1279
+
1280
+ This function assumes there are three small test file available:
1281
+ test1.csv, test2.txt, test3.csv
1282
+
1283
+ """
1284
+
1285
+ print('Testing functionality of "read_csv_file"')
1286
+
1287
+ num_passed = 0
1288
+ num_failed = 0
1289
+ num_tests = 0
1290
+ failed_tests_desc = ''
1291
+
1292
+ # For the three test files, give file name, the expected number of records
1293
+ # (assuming there is no header line) and the expected number of attributes
1294
+ # in each record
1295
+ #
1296
+ test_cases = [('test1.csv', 4, 3), ('test2.txt', 5, 4), ('test3.csv', 0, 1)]
1297
+
1298
+ for test_case in test_cases:
1299
+
1300
+ for header_flag in [True, False]:
1301
+ passed = True
1302
+ (header_list, file_data) = basefunctions.read_csv_file(
1303
+ test_case[0], 'ascii', header_flag
1304
+ )
1305
+ if header_flag == True:
1306
+ if len(file_data) > 0:
1307
+ if len(file_data) != test_case[1] - 1:
1308
+ passed = False
1309
+ else: # No records in file
1310
+ if len(file_data) != 0:
1311
+ passed = False
1312
+ if len(header_list) != test_case[2]:
1313
+ passed = False
1314
+ else:
1315
+ if header_list != None:
1316
+ passed = False
1317
+ if len(file_data) != test_case[1]:
1318
+ passed = False
1319
+
1320
+ for rec in file_data:
1321
+ if len(rec) != test_case[2]:
1322
+ passed = False
1323
+
1324
+ if passed == True:
1325
+ num_passed += 1
1326
+ else:
1327
+ num_failed += 1
1328
+ failed_tests_desc += "Failed reading the file: '%s'; " % (test_case[0])
1329
+ num_tests += 1
1330
+
1331
+ test_result_str = 'basefunctions,n/a,read_csv_file,' + 'n/a,funct,%d,' % (
1332
+ num_tests
1333
+ )
1334
+
1335
+ if num_failed == 0:
1336
+ test_result_str += 'all tests passed'
1337
+ else:
1338
+ test_result_str += '%d tests failed,' % (num_failed)
1339
+ test_result_str += '"' + failed_tests_desc[:-2] + '"'
1340
+
1341
+ return [test_result_str, '']
1342
+
1343
+ # ---------------------------------------------------------------------------
1344
+
1345
+ def testFunct_write_csv_file(self):
1346
+ """Test the functionality of 'write_csv_file', making sure this function
1347
+ correctly writes a list of values into a CSV file. To test this
1348
+ function we assume the read_csv_file() function is correct.
1349
+ """
1350
+
1351
+ print('Testing functionality of "write_csv_file"')
1352
+
1353
+ num_passed = 0
1354
+ num_failed = 0
1355
+ num_tests = 0
1356
+ failed_tests_desc = ''
1357
+
1358
+ test_cases = [
1359
+ [['test1'], ['test2'], ['test3']],
1360
+ [['1'], ['2'], ['3']],
1361
+ [['test 1'], ['test 2'], ['test 3']],
1362
+ [['%^&'], ['test $#%^'], ['123 @#*(']],
1363
+ [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9']],
1364
+ [['1', '2', '3'], ['4', '5', '6']],
1365
+ [['1', '2', '3'], ['4', '5', '6'], ['7', '8']],
1366
+ [
1367
+ ['id1', 'peter', 'lyneham'],
1368
+ ['id2', 'miller', 'dickson'],
1369
+ ['id3', 'smith', 'hackett'],
1370
+ ],
1371
+ ]
1372
+
1373
+ header_lists = [None, ['attr1', 'attr2', 'attr3']]
1374
+
1375
+ for test_case in test_cases:
1376
+
1377
+ for header_list in header_lists:
1378
+ basefunctions.write_csv_file('test.csv', 'ascii', header_list, test_case)
1379
+
1380
+ if header_list != None:
1381
+ (read_header_list, read_file_data) = basefunctions.read_csv_file(
1382
+ 'test.csv', 'ascii', True
1383
+ )
1384
+
1385
+ else:
1386
+ (read_header_list, read_file_data) = basefunctions.read_csv_file(
1387
+ 'test.csv', 'ascii', False
1388
+ )
1389
+
1390
+ if (read_header_list == header_list) and (read_file_data == test_case):
1391
+ num_passed += 1
1392
+ else:
1393
+ num_failed += 1
1394
+ failed_tests_desc += "Failed writing the data: '%s'; " % (str(test_case))
1395
+ num_tests += 1
1396
+
1397
+ test_result_str = 'basefunctions,n/a,write_csv_file,' + 'n/a,funct,%d,' % (
1398
+ num_tests
1399
+ )
1400
+
1401
+ if num_failed == 0:
1402
+ test_result_str += 'all tests passed'
1403
+ else:
1404
+ test_result_str += '%d tests failed,' % (num_failed)
1405
+ test_result_str += '"' + failed_tests_desc[:-2] + '"'
1406
+
1407
+ return [test_result_str, '']
1408
+
1409
+
1410
+ # =============================================================================
1411
+ # Generate a time string to be used for the log file
1412
+ #
1413
+ curr_time_tuple = time.localtime()
1414
+ curr_time_str = (
1415
+ str(curr_time_tuple[0])
1416
+ + str(curr_time_tuple[1]).zfill(2)
1417
+ + str(curr_time_tuple[2]).zfill(2)
1418
+ + '-'
1419
+ + str(curr_time_tuple[3]).zfill(2)
1420
+ + str(curr_time_tuple[4]).zfill(2)
1421
+ )
1422
+
1423
+ # Write test output header line into the log file
1424
+
1425
+ Path('logs').mkdir(parents=True, exist_ok=True)
1426
+ out_file_name = './logs/basefunctionsTest-%s.csv' % (curr_time_str)
1427
+
1428
+ out_file = open(out_file_name, 'w')
1429
+
1430
+ out_file.write("Test results generated by basefunctionsTest.py" + os.linesep)
1431
+
1432
+ out_file.write("Test started: " + curr_time_str + os.linesep)
1433
+
1434
+ out_file.write(os.linesep)
1435
+
1436
+ out_file.write(
1437
+ 'Module name,Class name,Method name,Arguments,Test_type,'
1438
+ + 'Patterns tested,Summary,Failure description'
1439
+ + os.linesep
1440
+ )
1441
+
1442
+ out_file.write(os.linesep)
1443
+
1444
+ # Create instances for the testcase class that calls all tests
1445
+ #
1446
+ test_res_list = []
1447
+ test_case_ins = TestCase('testArguments')
1448
+ test_res_list += test_case_ins.testArguments(test_argument_data_dict)
1449
+
1450
+ test_case_ins = TestCase('testFunct_char_set_ascii')
1451
+ test_res_list += test_case_ins.testFunct_char_set_ascii()
1452
+
1453
+ test_case_ins = TestCase('testFunct_float_to_str')
1454
+ test_res_list += test_case_ins.testFunct_float_to_str()
1455
+
1456
+ test_case_ins = TestCase('testFunct_str2comma_separated_list')
1457
+ test_res_list += test_case_ins.testFunct_str2comma_separated_list()
1458
+
1459
+ test_case_ins = TestCase('testFunct_read_csv_file')
1460
+ test_res_list += test_case_ins.testFunct_read_csv_file()
1461
+
1462
+ test_case_ins = TestCase('testFunct_write_csv_file')
1463
+ test_res_list += test_case_ins.testFunct_write_csv_file()
1464
+
1465
+ # Write test output results into the log file
1466
+ #
1467
+ for line in test_res_list:
1468
+ out_file.write(line + os.linesep)
1469
+
1470
+ out_file.close()
1471
+
1472
+ print('Test results are written to', out_file_name)
1473
+
1474
+ for line in test_res_list:
1475
+ print(line)
tests/contdepfunct_test.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ import random
5
+ from pathlib import Path
6
+
7
+ random.seed(42)
8
+ import time
9
+ import unittest
10
+
11
+ from geco_data_generator import contdepfunct
12
+
13
+
14
+ # Define the number of tests to be done for the functionality tests
15
+ #
16
+ num_tests = 100000
17
+
18
+ # Define argument test cases here
19
+ #
20
+ test_argument_data_dict = {
21
+ ('contdepfunct','n/a','blood_pressure_depending_on_age'): \
22
+ {'age':[[[1],[2],[3],[77],[8],[9],[3.76],[99.9],[129.65],[42]],
23
+ [[None],[''],['test'],[-65],[999],[-0.03],[130.01],
24
+ [187.87],[{}],[[]]]]},
25
+ ('contdepfunct','n/a','salary_depending_on_age'): \
26
+ {'age':[[[1],[2],[3],[77],[8],[9],[3.76],[99.9],[129.65],[42]],
27
+ [[None],[''],['test'],[-65],[999],[-0.03],[130.01],
28
+ [187.87],[{}],[[]]]]}
29
+ }
30
+
31
+ # =============================================================================
32
+
33
+ class TestCase(unittest.TestCase):
34
+
35
+ # Initialise test case - - - - - - - - - - - - - - - - - - - - - - - - - - -
36
+ #
37
+ def setUp(self):
38
+ pass # Nothing to initialize
39
+
40
+ # Clean up test case - - - - - - - - - - - - - - - - - - - - - - - - - - - -
41
+ #
42
+ def tearDown(self):
43
+ pass # Nothing to clean up
44
+
45
+ # ---------------------------------------------------------------------------
46
+ # Start test cases
47
+
48
+ def testArguments(self, test_data):
49
+ """Test if a function or method can be called or initialised correctly with
50
+ different values for their input arguments (parameters).
51
+
52
+ The argument 'test_data' must be a dictionary with the following
53
+ structure:
54
+
55
+ - Keys are tuples consisting of three strings:
56
+ (module_name, class_name, function_or_method_name)
57
+ - Values are dictionaries where the keys are the names of the input
58
+ argument that is being tested, and the values of these dictionaries
59
+ are a list that contains two lists. The first list contains valid
60
+ input arguments ('normal' argument tests) that should pass the test,
61
+ while the second list contains illegal input arguments ('exception'
62
+ argument tests) that should raise an exception.
63
+
64
+ The lists of test cases are itself lists, each containing a number of
65
+ input argument values, as many as are expected by the function or
66
+ method that is being tested.
67
+
68
+ This function returns a list containing the test results, where each
69
+ list element is a string with comma separated values (CSV) which are to
70
+ be written into the testing log file.
71
+ """
72
+
73
+ test_res_list = [] # The test results, one element per element in the test
74
+ # data dictionary
75
+
76
+ for (test_method_names, test_method_data) in test_data.items():
77
+ test_method_name = test_method_names[2]
78
+ print('Testing arguments for method/function:', test_method_name)
79
+
80
+ for argument_name in test_method_data:
81
+ print(' Testing input argument:', argument_name)
82
+
83
+ norm_test_data = test_method_data[argument_name][0]
84
+ exce_test_data = test_method_data[argument_name][1]
85
+ print(' Normal test cases: ', norm_test_data)
86
+ print(' Exception test cases:', exce_test_data)
87
+
88
+ # Conduct normal tests - - - - - - - - - - - - - - - - - - - - - - - -
89
+ #
90
+ num_norm_test_cases = len(norm_test_data)
91
+ num_norm_test_passed = 0
92
+ num_norm_test_failed = 0
93
+ norm_failed_desc_str = ''
94
+
95
+ for test_input in norm_test_data:
96
+ passed = True # Assume the test will pass :-)
97
+
98
+ if (len(test_input) == 0): # Method has no input argument
99
+ try:
100
+ getattr(contdepfunct,test_method_name)()
101
+ except:
102
+ passed = False
103
+
104
+ elif (len(test_input) == 1): # Method has one input argument
105
+ try:
106
+ getattr(contdepfunct,test_method_name)(test_input[0])
107
+ except:
108
+ passed = False
109
+
110
+ elif (len(test_input) == 2): # Method has two input arguments
111
+ try:
112
+ getattr(contdepfunct,test_method_name)(test_input[0],
113
+ test_input[1])
114
+ except:
115
+ passed = False
116
+
117
+ elif (len(test_input) == 3): # Method has three input arguments
118
+ try:
119
+ getattr(contdepfunct,test_method_name)(test_input[0],
120
+ test_input[1],
121
+ test_input[2])
122
+ except:
123
+ passed = False
124
+
125
+ elif (len(test_input) == 4): # Method has four input arguments
126
+ try:
127
+ getattr(contdepfunct,test_method_name)(test_input[0],
128
+ test_input[1],
129
+ test_input[2],
130
+ test_input[3])
131
+ except:
132
+ passed = False
133
+
134
+ elif (len(test_input) == 5): # Method has five input arguments
135
+ try:
136
+ getattr(contdepfunct,test_method_name)(test_input[0],
137
+ test_input[1],
138
+ test_input[2],
139
+ test_input[3],
140
+ test_input[4])
141
+ except:
142
+ passed = False
143
+
144
+ else:
145
+ raise Exception('Illegal number of input arguments')
146
+
147
+ # Now process test results
148
+ #
149
+ if (passed == False):
150
+ num_norm_test_failed += 1
151
+ norm_failed_desc_str += 'Failed test for input ' + \
152
+ "'%s'; " % (str(test_input))
153
+ else:
154
+ num_norm_test_passed += 1
155
+
156
+ assert num_norm_test_failed+num_norm_test_passed == num_norm_test_cases
157
+
158
+ norm_test_result_str = test_method_names[0] + ',' + \
159
+ test_method_names[1] + ',' + \
160
+ test_method_names[2] + ',' + \
161
+ argument_name + ',normal,' + \
162
+ '%d,' % (num_norm_test_cases)
163
+ if (num_norm_test_failed == 0):
164
+ norm_test_result_str += 'all tests passed'
165
+ else:
166
+ norm_test_result_str += '%d tests failed,' % (num_norm_test_failed)
167
+ norm_test_result_str += '"'+norm_failed_desc_str[:-2]+'"'
168
+
169
+ test_res_list.append(norm_test_result_str)
170
+
171
+ # Conduct exception tests - - - - - - - - - - - - - - - - - - - - - - -
172
+ #
173
+ num_exce_test_cases = len(exce_test_data)
174
+ num_exce_test_passed = 0
175
+ num_exce_test_failed = 0
176
+ exce_failed_desc_str = ''
177
+
178
+ for test_input in exce_test_data:
179
+ passed = True # Assume the test will pass (i.e. raise an exception)
180
+
181
+ if (len(test_input) == 0): # Method has no input argument
182
+ try:
183
+ self.assertRaises(Exception,
184
+ getattr(contdepfunct,test_method_name))
185
+ except:
186
+ passed = False
187
+
188
+ elif (len(test_input) == 1): # Method has one input argument
189
+ try:
190
+ self.assertRaises(Exception,
191
+ getattr(contdepfunct,test_method_name),test_input[0])
192
+ except:
193
+ passed = False
194
+
195
+ elif (len(test_input) == 2): # Method has two input arguments
196
+ try:
197
+ self.assertRaises(Exception,
198
+ getattr(contdepfunct,test_method_name),test_input[0],
199
+ test_input[1])
200
+ except:
201
+ passed = False
202
+
203
+ elif (len(test_input) == 3): # Method has three input arguments
204
+ try:
205
+ self.assertRaises(Exception,
206
+ getattr(contdepfunct,test_method_name),test_input[0],
207
+ test_input[1],
208
+ test_input[2])
209
+ except:
210
+ passed = False
211
+
212
+ elif (len(test_input) == 4): # Method has four input arguments
213
+ try:
214
+ self.assertRaises(Exception,
215
+ getattr(contdepfunct,test_method_name),test_input[0],
216
+ test_input[1],
217
+ test_input[2],
218
+ test_input[3])
219
+ except:
220
+ passed = False
221
+
222
+ elif (len(test_input) == 5): # Method has five input arguments
223
+ try:
224
+ self.assertRaises(Exception,
225
+ getattr(contdepfunct,test_method_name),test_input[0],
226
+ test_input[1],
227
+ test_input[2],
228
+ test_input[3],
229
+ test_input[4])
230
+ except:
231
+ passed = False
232
+
233
+ else:
234
+ raise Exception('Illegal number of input arguments')
235
+
236
+ # Now process test results
237
+ #
238
+ if (passed == False):
239
+ num_exce_test_failed += 1
240
+ exce_failed_desc_str += 'Failed test for input ' + \
241
+ "'%s'; " % (str(test_input))
242
+ else:
243
+ num_exce_test_passed += 1
244
+
245
+ assert num_exce_test_failed+num_exce_test_passed == num_exce_test_cases
246
+
247
+ exce_test_result_str = test_method_names[0] + ',' + \
248
+ test_method_names[1] + ',' + \
249
+ test_method_names[2] + ',' + \
250
+ argument_name + ',exception,' + \
251
+ '%d,' % (num_exce_test_cases)
252
+ if (num_exce_test_failed == 0):
253
+ exce_test_result_str += 'all tests passed'
254
+ else:
255
+ exce_test_result_str += '%d tests failed,' % (num_exce_test_failed)
256
+ exce_test_result_str += '"'+exce_failed_desc_str[:-2]+'"'
257
+
258
+ test_res_list.append(exce_test_result_str)
259
+
260
+ test_res_list.append('') # Empty line between tests of methods
261
+
262
+ return test_res_list
263
+
264
+ # ---------------------------------------------------------------------------
265
+
266
+ def testFunct_blood_pressure_depending_on_age(self, num_tests):
267
+ """Test the functionality of 'blood_pressure_depending_on_age', making
268
+ sure this function returns a positive floating point value.
269
+ """
270
+
271
+ print('Testing functionality of "blood_pressure_depending_on_age"')
272
+
273
+ num_passed = 0
274
+ num_failed = 0
275
+
276
+ for i in range(num_tests):
277
+
278
+ age = random.uniform(0.0, 120.0)
279
+
280
+ try:
281
+ assert (contdepfunct.blood_pressure_depending_on_age(age) >= 0.0)
282
+ num_passed += 1
283
+ except:
284
+ num_failed += 1
285
+
286
+ assert num_passed + num_failed == num_tests
287
+
288
+ test_result_str = 'contdepfunct,n/a,blood_pressure_depending_on_age,' + \
289
+ 'n/a,funct,%d,' % (num_tests)
290
+ if (num_failed == 0):
291
+ test_result_str += 'all tests passed'
292
+ else:
293
+ test_result_str += '%d tests failed' % (num_failed)
294
+
295
+ return [test_result_str,'']
296
+
297
+ # ---------------------------------------------------------------------------
298
+
299
+ def testFunct_salary_depending_on_age(self, num_tests):
300
+ """Test the functionality of 'salary_depending_on_age', making sure
301
+ this function returns a positive floating point value.
302
+ """
303
+
304
+ print('Testing functionality of "salary_depending_on_age"')
305
+
306
+ num_passed = 0
307
+ num_failed = 0
308
+
309
+ for i in range(num_tests):
310
+
311
+ age = random.uniform(0.0, 120.0)
312
+
313
+ try:
314
+ assert (contdepfunct.salary_depending_on_age(age) >= 0.0)
315
+ num_passed += 1
316
+ except:
317
+ num_failed += 1
318
+
319
+ assert num_passed + num_failed == num_tests
320
+
321
+ test_result_str = 'contdepfunct,n/a,salary_depending_on_age,' + \
322
+ 'n/a,funct,%d,' % (num_tests)
323
+ if (num_failed == 0):
324
+ test_result_str += 'all tests passed'
325
+ else:
326
+ test_result_str += '%d tests failed' % (num_failed)
327
+
328
+ return [test_result_str,'']
329
+
330
+ # =============================================================================
331
+ # Generate a time string to be used for the log file
332
+ #
333
+ curr_time_tuple = time.localtime()
334
+ curr_time_str = str(curr_time_tuple[0]) + str(curr_time_tuple[1]).zfill(2) + \
335
+ str(curr_time_tuple[2]).zfill(2) + '-' + \
336
+ str(curr_time_tuple[3]).zfill(2) + \
337
+ str(curr_time_tuple[4]).zfill(2)
338
+
339
+ # Write test output header line into the log file
340
+ #
341
+ Path('./logs').mkdir(exist_ok=True)
342
+ out_file_name = './logs/contdepfunctTest-%s.csv' % (curr_time_str)
343
+
344
+ out_file = open(out_file_name, 'w')
345
+
346
+ out_file.write('Test results generated by contdepfunctTest.py' + os.linesep)
347
+
348
+ out_file.write('Test started: ' + curr_time_str + os.linesep)
349
+
350
+ out_file.write(os.linesep)
351
+
352
+ out_file.write('Module name,Class name,Method name,Arguments,Test_type,' + \
353
+ 'Patterns tested,Summary,Failure description' + os.linesep)
354
+ out_file.write(os.linesep)
355
+
356
+ # Create instances for the testcase class that calls all tests
357
+ #
358
+ test_res_list = []
359
+ test_case_ins = TestCase('testArguments')
360
+ test_res_list += test_case_ins.testArguments(test_argument_data_dict)
361
+
362
+ test_case_ins = TestCase('testFunct_blood_pressure_depending_on_age')
363
+ test_res_list += \
364
+ test_case_ins.testFunct_blood_pressure_depending_on_age(num_tests)
365
+
366
+ test_case_ins = TestCase('testFunct_salary_depending_on_age')
367
+ test_res_list += test_case_ins.testFunct_salary_depending_on_age(num_tests)
368
+
369
+ # Write test output results into the log file
370
+ #
371
+ for line in test_res_list:
372
+ out_file.write(line + os.linesep)
373
+
374
+ out_file.close()
375
+
376
+ print('Test results are written to', out_file_name)
377
+
378
+ for line in test_res_list:
379
+ print(line)
380
+
381
+
tests/corruptor_test.py ADDED
The diff for this file is too large to render. See raw diff
 
tests/generator_test.py ADDED
The diff for this file is too large to render. See raw diff
 
tests/logs/basefunctionsTest-20230130-1746.csv ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Test results generated by basefunctionsTest.py
2
+ Test started: 20230130-1746
3
+
4
+ Module name,Class name,Method name,Arguments,Test_type,Patterns tested,Summary,Failure description
5
+
6
+ basefunctions,n/a,check_is_not_none,variable,normal,3,all tests passed
7
+ basefunctions,n/a,check_is_not_none,variable,exception,6,all tests passed
8
+ basefunctions,n/a,check_is_not_none,value,normal,5,all tests passed
9
+ basefunctions,n/a,check_is_not_none,value,exception,1,all tests passed
10
+
11
+ basefunctions,n/a,check_is_string,variable,normal,3,all tests passed
12
+ basefunctions,n/a,check_is_string,variable,exception,6,all tests passed
13
+ basefunctions,n/a,check_is_string,value,normal,7,all tests passed
14
+ basefunctions,n/a,check_is_string,value,exception,4,all tests passed
15
+
16
+ basefunctions,n/a,check_is_unicode_string,variable,normal,3,all tests passed
17
+ basefunctions,n/a,check_is_unicode_string,variable,exception,6,all tests passed
18
+ basefunctions,n/a,check_is_unicode_string,value,normal,4,all tests passed
19
+ basefunctions,n/a,check_is_unicode_string,value,exception,6,6 tests failed,"Failed test for input '['testArgument', None]'; Failed test for input '['testArgument', '']'; Failed test for input '['testArgument', -123]'; Failed test for input '['testArgument', 123]'; Failed test for input '['testArgument', 1.87]'; Failed test for input '['testArgument', 'ascii']'"
20
+
21
+ basefunctions,n/a,check_is_string_or_unicode_string,variable,normal,4,all tests passed
22
+ basefunctions,n/a,check_is_string_or_unicode_string,variable,exception,6,all tests passed
23
+ basefunctions,n/a,check_is_string_or_unicode_string,value,normal,6,all tests passed
24
+ basefunctions,n/a,check_is_string_or_unicode_string,value,exception,6,all tests passed
25
+
26
+ basefunctions,n/a,check_is_non_empty_string,variable,normal,3,all tests passed
27
+ basefunctions,n/a,check_is_non_empty_string,variable,exception,6,all tests passed
28
+ basefunctions,n/a,check_is_non_empty_string,value,normal,6,all tests passed
29
+ basefunctions,n/a,check_is_non_empty_string,value,exception,7,all tests passed
30
+
31
+ basefunctions,n/a,check_is_number,variable,normal,3,all tests passed
32
+ basefunctions,n/a,check_is_number,variable,exception,6,all tests passed
33
+ basefunctions,n/a,check_is_number,value,normal,6,all tests passed
34
+ basefunctions,n/a,check_is_number,value,exception,8,all tests passed
35
+
36
+ basefunctions,n/a,check_is_positive,variable,normal,3,all tests passed
37
+ basefunctions,n/a,check_is_positive,variable,exception,6,all tests passed
38
+ basefunctions,n/a,check_is_positive,value,normal,7,all tests passed
39
+ basefunctions,n/a,check_is_positive,value,exception,8,all tests passed
40
+
41
+ basefunctions,n/a,check_is_not_negative,variable,normal,3,all tests passed
42
+ basefunctions,n/a,check_is_not_negative,variable,exception,6,all tests passed
43
+ basefunctions,n/a,check_is_not_negative,value,normal,9,all tests passed
44
+ basefunctions,n/a,check_is_not_negative,value,exception,8,all tests passed
45
+
46
+ basefunctions,n/a,check_is_normalised,variable,normal,3,all tests passed
47
+ basefunctions,n/a,check_is_normalised,variable,exception,6,all tests passed
48
+ basefunctions,n/a,check_is_normalised,value,normal,10,all tests passed
49
+ basefunctions,n/a,check_is_normalised,value,exception,10,all tests passed
50
+
51
+ basefunctions,n/a,check_is_percentage,variable,normal,3,all tests passed
52
+ basefunctions,n/a,check_is_percentage,variable,exception,6,all tests passed
53
+ basefunctions,n/a,check_is_percentage,value,normal,14,all tests passed
54
+ basefunctions,n/a,check_is_percentage,value,exception,10,all tests passed
55
+
56
+ basefunctions,n/a,check_is_integer,variable,normal,3,all tests passed
57
+ basefunctions,n/a,check_is_integer,variable,exception,6,all tests passed
58
+ basefunctions,n/a,check_is_integer,value,normal,8,all tests passed
59
+ basefunctions,n/a,check_is_integer,value,exception,10,all tests passed
60
+
61
+ basefunctions,n/a,check_is_float,variable,normal,3,all tests passed
62
+ basefunctions,n/a,check_is_float,variable,exception,6,all tests passed
63
+ basefunctions,n/a,check_is_float,value,normal,6,all tests passed
64
+ basefunctions,n/a,check_is_float,value,exception,10,all tests passed
65
+
66
+ basefunctions,n/a,check_is_dictionary,variable,normal,3,all tests passed
67
+ basefunctions,n/a,check_is_dictionary,variable,exception,6,all tests passed
68
+ basefunctions,n/a,check_is_dictionary,value,normal,3,all tests passed
69
+ basefunctions,n/a,check_is_dictionary,value,exception,4,all tests passed
70
+
71
+ basefunctions,n/a,check_is_list,variable,normal,3,all tests passed
72
+ basefunctions,n/a,check_is_list,variable,exception,6,all tests passed
73
+ basefunctions,n/a,check_is_list,value,normal,4,all tests passed
74
+ basefunctions,n/a,check_is_list,value,exception,4,all tests passed
75
+
76
+ basefunctions,n/a,check_is_set,variable,normal,3,all tests passed
77
+ basefunctions,n/a,check_is_set,variable,exception,6,all tests passed
78
+ basefunctions,n/a,check_is_set,value,normal,4,all tests passed
79
+ basefunctions,n/a,check_is_set,value,exception,6,all tests passed
80
+
81
+ basefunctions,n/a,check_is_tuple,variable,normal,3,all tests passed
82
+ basefunctions,n/a,check_is_tuple,variable,exception,6,all tests passed
83
+ basefunctions,n/a,check_is_tuple,value,normal,5,all tests passed
84
+ basefunctions,n/a,check_is_tuple,value,exception,8,all tests passed
85
+
86
+ basefunctions,n/a,check_is_flag,variable,normal,3,all tests passed
87
+ basefunctions,n/a,check_is_flag,variable,exception,6,all tests passed
88
+ basefunctions,n/a,check_is_flag,value,normal,6,all tests passed
89
+ basefunctions,n/a,check_is_flag,value,exception,6,all tests passed
90
+
91
+ basefunctions,n/a,check_unicode_encoding_exists,unicode_encoding_string,normal,3,all tests passed
92
+ basefunctions,n/a,check_unicode_encoding_exists,unicode_encoding_string,exception,4,all tests passed
93
+
94
+ basefunctions,n/a,check_is_function_or_method,variable,normal,3,all tests passed
95
+ basefunctions,n/a,check_is_function_or_method,variable,exception,6,all tests passed
96
+ basefunctions,n/a,check_is_function_or_method,value,normal,4,all tests passed
97
+ basefunctions,n/a,check_is_function_or_method,value,exception,6,all tests passed
98
+
99
+ basefunctions,n/a,char_set_ascii,string_variable,normal,5,all tests passed
100
+ basefunctions,n/a,char_set_ascii,string_variable,exception,6,all tests passed
101
+
102
+ basefunctions,n/a,check_is_valid_format_str,variable,normal,3,all tests passed
103
+ basefunctions,n/a,check_is_valid_format_str,variable,exception,6,all tests passed
104
+ basefunctions,n/a,check_is_valid_format_str,value,normal,10,all tests passed
105
+ basefunctions,n/a,check_is_valid_format_str,value,exception,8,all tests passed
106
+
107
+ basefunctions,n/a,float_to_str,number_variable,normal,5,all tests passed
108
+ basefunctions,n/a,float_to_str,number_variable,exception,6,all tests passed
109
+ basefunctions,n/a,float_to_str,format_string,normal,10,all tests passed
110
+ basefunctions,n/a,float_to_str,format_string,exception,8,all tests passed
111
+
112
+ basefunctions,n/a,str2comma_separated_list,string_variable,normal,8,all tests passed
113
+ basefunctions,n/a,str2comma_separated_list,string_variable,exception,6,1 tests failed,"Failed test for input '['']'"
114
+
115
+ basefunctions,n/a,read_csv_file,file_name,normal,3,all tests passed
116
+ basefunctions,n/a,read_csv_file,file_name,exception,6,all tests passed
117
+ basefunctions,n/a,read_csv_file,encoding,normal,4,all tests passed
118
+ basefunctions,n/a,read_csv_file,encoding,exception,6,all tests passed
119
+ basefunctions,n/a,read_csv_file,header_line,normal,6,all tests passed
120
+ basefunctions,n/a,read_csv_file,header_line,exception,6,all tests passed
121
+
122
+ basefunctions,n/a,write_csv_file,file_name,normal,4,all tests passed
123
+ basefunctions,n/a,write_csv_file,file_name,exception,6,all tests passed
124
+ basefunctions,n/a,write_csv_file,encoding,normal,4,all tests passed
125
+ basefunctions,n/a,write_csv_file,encoding,exception,6,all tests passed
126
+ basefunctions,n/a,write_csv_file,header_list,normal,6,all tests passed
127
+ basefunctions,n/a,write_csv_file,header_list,exception,6,all tests passed
128
+ basefunctions,n/a,write_csv_file,file_data,normal,6,all tests passed
129
+ basefunctions,n/a,write_csv_file,file_data,exception,7,all tests passed
130
+
131
+ basefunctions,n/a,char_set_ascii,n/a,funct,26,all tests passed
132
+
133
+ basefunctions,n/a,float_to_str,n/a,funct,44,all tests passed
134
+
135
+ basefunctions,n/a,str2comma_separated_list,n/a,funct,10,all tests passed
136
+
137
+ basefunctions,n/a,read_csv_file,n/a,funct,3,all tests passed
138
+
139
+ basefunctions,n/a,write_csv_file,n/a,funct,8,all tests passed
140
+
tests/logs/contdepfunctTest-20230130-1748.csv ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Test results generated by contdepfunctTest.py
2
+ Test started: 20230130-1748
3
+
4
+ Module name,Class name,Method name,Arguments,Test_type,Patterns tested,Summary,Failure description
5
+
tests/logs/contdepfunctTest-20230130-1749.csv ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Test results generated by contdepfunctTest.py
2
+ Test started: 20230130-1749
3
+
4
+ Module name,Class name,Method name,Arguments,Test_type,Patterns tested,Summary,Failure description
5
+
6
+ contdepfunct,n/a,blood_pressure_depending_on_age,age,normal,10,all tests passed
7
+ contdepfunct,n/a,blood_pressure_depending_on_age,age,exception,10,all tests passed
8
+
9
+ contdepfunct,n/a,salary_depending_on_age,age,normal,10,all tests passed
10
+ contdepfunct,n/a,salary_depending_on_age,age,exception,10,all tests passed
11
+
12
+ contdepfunct,n/a,blood_pressure_depending_on_age,n/a,funct,100000,all tests passed
13
+
14
+ contdepfunct,n/a,salary_depending_on_age,n/a,funct,100000,all tests passed
15
+
tests/main_test.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ from pathlib import Path
4
+
5
+ from geco_data_generator import (
6
+ attrgenfunct,
7
+ contdepfunct,
8
+ basefunctions,
9
+ generator,
10
+ corruptor,
11
+ )
12
+
13
+ do_large_tests = False # Set to True to run tests to generate large
14
+ # datasets - warning is time consuming
15
+ import os
16
+ import time
17
+ import unittest
18
+
19
+ import random
20
+
21
+ random.seed(42)
22
+
23
+ # =============================================================================
24
+
25
+ # Define test cases, each being a list containing the main parameters required
26
+ # for generating a data set:
27
+ # 1) rec_id_attr_name
28
+ # 2) num_org_rec
29
+ # 3) num_dup_rec
30
+ # 4) max_duplicate_per_record
31
+ # 5) num_duplicates_distribution ('uniform', 'poisson', 'zipf')
32
+ # 6) max_modification_per_attr
33
+ # 7) num_modification_per_record
34
+ #
35
+ test_cases = [
36
+ ['rec_id', 100, 100, 1, 'uniform', 1, 1],
37
+ ['rec_id', 100, 100, 1, 'poisson', 1, 1],
38
+ ['rec_id', 100, 100, 1, 'zipf', 1, 1],
39
+ ['rec_id', 10000, 10000, 1, 'uniform', 1, 1],
40
+ ['rec_id', 10000, 10000, 1, 'poisson', 1, 1],
41
+ ['rec_id', 10000, 10000, 1, 'zipf', 1, 1],
42
+ ]
43
+ if do_large_tests == True:
44
+ test_cases += [
45
+ ['rec_id', 100000, 100000, 1, 'uniform', 1, 1],
46
+ ['rec_id', 100000, 100000, 1, 'poisson', 1, 1],
47
+ ['rec_id', 100000, 100000, 1, 'zipf', 1, 1],
48
+ ]
49
+ #
50
+ test_cases += [
51
+ ['rec_id', 100, 20, 1, 'uniform', 1, 1],
52
+ ['rec_id', 100, 20, 1, 'poisson', 1, 1],
53
+ ['rec_id', 100, 20, 1, 'zipf', 1, 1],
54
+ ['rec_id', 10000, 2000, 1, 'uniform', 1, 1],
55
+ ['rec_id', 10000, 2000, 1, 'poisson', 1, 1],
56
+ ['rec_id', 10000, 2000, 1, 'zipf', 1, 1],
57
+ ]
58
+ if do_large_tests == True:
59
+ test_cases += [
60
+ ['rec_id', 100000, 20000, 1, 'uniform', 1, 1],
61
+ ['rec_id', 100000, 20000, 1, 'poisson', 1, 1],
62
+ ['rec_id', 100000, 20000, 1, 'zipf', 1, 1],
63
+ ]
64
+ #
65
+ test_cases += [
66
+ ['rec_num', 123, 321, 5, 'uniform', 1, 3],
67
+ ['rec_num', 123, 321, 5, 'poisson', 1, 3],
68
+ ['rec_num', 123, 321, 5, 'zipf', 1, 3],
69
+ ['rec_num', 12345, 14321, 5, 'uniform', 1, 3],
70
+ ['rec_num', 12345, 14321, 5, 'poisson', 1, 3],
71
+ ['rec_num', 12345, 14321, 5, 'zipf', 1, 3],
72
+ ]
73
+ if do_large_tests == True:
74
+ test_cases += [
75
+ ['rec_num', 123456, 154321, 5, 'uniform', 1, 3],
76
+ ['rec_num', 123456, 154321, 5, 'poisson', 1, 3],
77
+ ['rec_num', 123456, 154321, 5, 'zipf', 1, 3],
78
+ ]
79
+ #
80
+ test_cases += [
81
+ ['rec_num', 123, 321, 3, 'uniform', 3, 9],
82
+ ['rec_num', 123, 321, 3, 'poisson', 3, 9],
83
+ ['rec_num', 123, 321, 3, 'zipf', 3, 9],
84
+ ['rec_num', 12345, 14321, 3, 'uniform', 3, 9],
85
+ ['rec_num', 12345, 14321, 3, 'poisson', 3, 9],
86
+ ['rec_num', 12345, 14321, 3, 'zipf', 3, 9],
87
+ ]
88
+ if do_large_tests == True:
89
+ test_cases += [
90
+ ['rec_num', 123456, 154321, 3, 'uniform', 3, 9],
91
+ ['rec_num', 123456, 154321, 3, 'poisson', 3, 9],
92
+ ['rec_num', 123456, 154321, 3, 'zipf', 3, 9],
93
+ ]
94
+ #
95
+ test_cases += [
96
+ ['rec_num', 321, 123, 11, 'uniform', 2, 7],
97
+ ['rec_num', 321, 123, 11, 'poisson', 2, 7],
98
+ ['rec_num', 321, 123, 11, 'zipf', 2, 7],
99
+ ['rec_num', 43210, 14321, 11, 'uniform', 2, 7],
100
+ ['rec_num', 43210, 14321, 11, 'poisson', 2, 7],
101
+ ['rec_num', 43210, 14321, 11, 'zipf', 2, 7],
102
+ ]
103
+ if do_large_tests == True:
104
+ test_cases += [
105
+ ['rec_num', 654321, 123456, 11, 'uniform', 2, 7],
106
+ ['rec_num', 654321, 123456, 11, 'poisson', 2, 7],
107
+ ['rec_num', 654321, 123456, 11, 'zipf', 2, 7],
108
+ ]
109
+
110
+ # Set the Unicode encoding for all test data generation
111
+ #
112
+ unicode_encoding_used = 'ascii'
113
+
114
+ # Check the unicode encoding selected is valid
115
+ #
116
+ basefunctions.check_unicode_encoding_exists(unicode_encoding_used)
117
+
118
+ # =============================================================================
119
+
120
+
121
+ class TestCase(unittest.TestCase):
122
+
123
+ # Initialise test case - - - - - - - - - - - - - - - - - - - - - - - - - - -
124
+ #
125
+ def setUp(self):
126
+ pass # Nothing to initialize
127
+
128
+ # Clean up test case - - - - - - - - - - - - - - - - - - - - - - - - - - - -
129
+ #
130
+ def tearDown(self):
131
+ pass # Nothing to clean up
132
+
133
+ # ---------------------------------------------------------------------------
134
+ # Start test cases
135
+
136
+ def testDataGeneration(self, test_case):
137
+ """Test the overall generation of a data set according to the parameters
138
+ given by checking if the generated data sets follows the parameter
139
+ specification given.
140
+ """
141
+
142
+ rec_id_attr_name = test_case[0]
143
+ num_org_rec = test_case[1]
144
+ num_dup_rec = test_case[2]
145
+ max_duplicate_per_record = test_case[3]
146
+ num_duplicates_distribution = test_case[4]
147
+ max_modification_per_attr = test_case[5]
148
+ num_modification_per_record = test_case[6]
149
+
150
+ test_res_list = ['', 'Test case parameters:']
151
+ test_res_list.append(' rec_id_attr_name = %s' % (rec_id_attr_name))
152
+ test_res_list.append(' num_org_rec = %s' % (num_org_rec))
153
+ test_res_list.append(' num_dup_rec = %s' % (num_dup_rec))
154
+ test_res_list.append(
155
+ ' max_duplicate_per_record = %s' % (max_duplicate_per_record)
156
+ )
157
+ test_res_list.append(
158
+ ' num_duplicates_distribution = %s' % (num_duplicates_distribution)
159
+ )
160
+ test_res_list.append(
161
+ ' max_modification_per_attr = %s' % (max_modification_per_attr)
162
+ )
163
+ test_res_list.append(
164
+ ' num_modification_per_record = %s' % (num_modification_per_record)
165
+ )
166
+ test_res_list.append('')
167
+
168
+ # Define the attributes to be generated (based on methods from - - - - -
169
+ # the generator.py module)
170
+
171
+ # Individual attributes
172
+ #
173
+ given_name_attr = generator.GenerateFreqAttribute(
174
+ attribute_name='given-name',
175
+ freq_file_name='../lookup-files/givenname_freq.csv',
176
+ has_header_line=False,
177
+ unicode_encoding=unicode_encoding_used,
178
+ )
179
+
180
+ surnname_attr = generator.GenerateFreqAttribute(
181
+ attribute_name='surname',
182
+ freq_file_name='../lookup-files/surname-freq.csv',
183
+ has_header_line=False,
184
+ unicode_encoding=unicode_encoding_used,
185
+ )
186
+
187
+ postcode_attr = generator.GenerateFreqAttribute(
188
+ attribute_name='postcode',
189
+ freq_file_name='../lookup-files/postcode_act_freq.csv',
190
+ has_header_line=False,
191
+ unicode_encoding=unicode_encoding_used,
192
+ )
193
+
194
+ oz_phone_num_attr = generator.GenerateFuncAttribute(
195
+ attribute_name='oz-phone-number',
196
+ function=attrgenfunct.generate_phone_number_australia,
197
+ )
198
+
199
+ credit_card_attr = generator.GenerateFuncAttribute(
200
+ attribute_name='credit-card-number',
201
+ function=attrgenfunct.generate_credit_card_number,
202
+ )
203
+
204
+ age_uniform_attr = generator.GenerateFuncAttribute(
205
+ attribute_name='age-uniform',
206
+ function=attrgenfunct.generate_uniform_age,
207
+ parameters=[0, 120],
208
+ )
209
+
210
+ age_death_normal_attr = generator.GenerateFuncAttribute(
211
+ attribute_name='age-death-normal',
212
+ function=attrgenfunct.generate_normal_age,
213
+ parameters=[80, 20, 0, 120],
214
+ )
215
+
216
+ income_normal_attr = generator.GenerateFuncAttribute(
217
+ attribute_name='income-normal',
218
+ function=attrgenfunct.generate_normal_value,
219
+ parameters=[75000, 20000, 0, 1000000, 'float2'],
220
+ )
221
+
222
+ rating_normal_attr = generator.GenerateFuncAttribute(
223
+ attribute_name='rating-normal',
224
+ function=attrgenfunct.generate_normal_value,
225
+ parameters=[2.5, 1.0, 0.0, 5.0, 'int'],
226
+ )
227
+
228
+ # Compund (dependent) attributes
229
+ #
230
+ gender_city_comp_attr = generator.GenerateCateCateCompoundAttribute(
231
+ categorical1_attribute_name='gender',
232
+ categorical2_attribute_name='city',
233
+ lookup_file_name='../lookup-files/gender-city.csv',
234
+ has_header_line=True,
235
+ unicode_encoding=unicode_encoding_used,
236
+ )
237
+
238
+ gender_income_comp_attr = generator.GenerateCateContCompoundAttribute(
239
+ categorical_attribute_name='alt-gender',
240
+ continuous_attribute_name='income',
241
+ continuous_value_type='float1',
242
+ lookup_file_name='gender-income.csv',
243
+ has_header_line=False,
244
+ unicode_encoding=unicode_encoding_used,
245
+ )
246
+
247
+ gender_city_salary_comp_attr = generator.GenerateCateCateContCompoundAttribute(
248
+ categorical1_attribute_name='alt-gender-2',
249
+ categorical2_attribute_name='town',
250
+ continuous_attribute_name='salary',
251
+ continuous_value_type='float4',
252
+ lookup_file_name='gender-city-income.csv',
253
+ has_header_line=False,
254
+ unicode_encoding=unicode_encoding_used,
255
+ )
256
+
257
+ age_blood_pressure_comp_attr = generator.GenerateContContCompoundAttribute(
258
+ continuous1_attribute_name='medical-age',
259
+ continuous2_attribute_name='blood-pressure',
260
+ continuous1_funct_name='uniform',
261
+ continuous1_funct_param=[10, 110],
262
+ continuous2_function=contdepfunct.blood_pressure_depending_on_age,
263
+ continuous1_value_type='int',
264
+ continuous2_value_type='float3',
265
+ )
266
+
267
+ age_salary_comp_attr = generator.GenerateContContCompoundAttribute(
268
+ continuous1_attribute_name='medical-age-2',
269
+ continuous2_attribute_name='medical-salary',
270
+ continuous1_funct_name='normal',
271
+ continuous1_funct_param=[45, 20, 25, 130],
272
+ continuous2_function=contdepfunct.salary_depending_on_age,
273
+ continuous1_value_type='int',
274
+ continuous2_value_type='float1',
275
+ )
276
+
277
+ # Define how attribute values are to be modified (corrupted) - - - - - -
278
+ # (based on methods from the corruptor.py module)
279
+ #
280
+ average_edit_corruptor = corruptor.CorruptValueEdit(
281
+ position_function=corruptor.position_mod_normal,
282
+ char_set_funct=basefunctions.char_set_ascii,
283
+ insert_prob=0.25,
284
+ delete_prob=0.25,
285
+ substitute_prob=0.25,
286
+ transpose_prob=0.25,
287
+ )
288
+
289
+ sub_tra_edit_corruptor = corruptor.CorruptValueEdit(
290
+ position_function=corruptor.position_mod_uniform,
291
+ char_set_funct=basefunctions.char_set_ascii,
292
+ insert_prob=0.0,
293
+ delete_prob=0.0,
294
+ substitute_prob=0.5,
295
+ transpose_prob=0.5,
296
+ )
297
+
298
+ ins_del_edit_corruptor = corruptor.CorruptValueEdit(
299
+ position_function=corruptor.position_mod_normal,
300
+ char_set_funct=basefunctions.char_set_ascii,
301
+ insert_prob=0.5,
302
+ delete_prob=0.5,
303
+ substitute_prob=0.0,
304
+ transpose_prob=0.0,
305
+ )
306
+
307
+ surname_misspell_corruptor = corruptor.CorruptCategoricalValue(
308
+ lookup_file_name='surname-misspell.csv',
309
+ has_header_line=False,
310
+ unicode_encoding=unicode_encoding_used,
311
+ )
312
+
313
+ ocr_corruptor = corruptor.CorruptValueOCR(
314
+ position_function=corruptor.position_mod_uniform,
315
+ lookup_file_name='ocr-variations.csv',
316
+ has_header_line=False,
317
+ unicode_encoding=unicode_encoding_used,
318
+ )
319
+
320
+ keyboard_corruptor = corruptor.CorruptValueKeyboard(
321
+ position_function=corruptor.position_mod_normal, row_prob=0.5, col_prob=0.5
322
+ )
323
+
324
+ phonetic_corruptor = corruptor.CorruptValuePhonetic(
325
+ position_function=corruptor.position_mod_uniform,
326
+ lookup_file_name='phonetic-variations.csv',
327
+ has_header_line=False,
328
+ unicode_encoding=unicode_encoding_used,
329
+ )
330
+
331
+ missing_val_empty_corruptor = corruptor.CorruptMissingValue()
332
+ missing_val_miss_corruptor = corruptor.CorruptMissingValue(missing_value='miss')
333
+ missing_val_unkown_corruptor = corruptor.CorruptMissingValue(
334
+ missing_value='unknown'
335
+ )
336
+
337
+ # Define the attributes to be generated for this data set, and the data
338
+ # set itself
339
+ #
340
+ attr_name_list = [
341
+ 'given-name',
342
+ 'surname',
343
+ 'city',
344
+ 'postcode',
345
+ 'oz-phone-number',
346
+ 'credit-card-number',
347
+ 'age-uniform',
348
+ 'age-death-normal',
349
+ 'income-normal',
350
+ 'rating-normal',
351
+ 'gender',
352
+ 'alt-gender',
353
+ 'alt-gender-2',
354
+ 'town',
355
+ 'income',
356
+ 'salary',
357
+ 'medical-age',
358
+ 'blood-pressure',
359
+ 'medical-age-2',
360
+ 'medical-salary',
361
+ ]
362
+
363
+ attr_data_list = [
364
+ given_name_attr,
365
+ surnname_attr,
366
+ postcode_attr,
367
+ oz_phone_num_attr,
368
+ credit_card_attr,
369
+ age_uniform_attr,
370
+ age_death_normal_attr,
371
+ income_normal_attr,
372
+ rating_normal_attr,
373
+ gender_city_comp_attr,
374
+ gender_income_comp_attr,
375
+ gender_city_salary_comp_attr,
376
+ age_blood_pressure_comp_attr,
377
+ age_salary_comp_attr,
378
+ ]
379
+
380
+ # Initialise the main data generator
381
+ #
382
+ test_data_generator = generator.GenerateDataSet(
383
+ output_file_name='no-file-name',
384
+ write_header_line=True,
385
+ rec_id_attr_name=rec_id_attr_name,
386
+ number_of_records=num_org_rec,
387
+ attribute_name_list=attr_name_list,
388
+ attribute_data_list=attr_data_list,
389
+ unicode_encoding=unicode_encoding_used,
390
+ )
391
+
392
+ # Define distribution of how likely an attribute will be selected for
393
+ # modification (sum of probabilities must be 1.0)
394
+ #
395
+ attr_mod_prob_dictionary = {
396
+ 'given-name': 0.1,
397
+ 'surname': 0.1,
398
+ 'city': 0.1,
399
+ 'postcode': 0.1,
400
+ 'oz-phone-number': 0.1,
401
+ 'age-death-normal': 0.1,
402
+ 'income-normal': 0.1,
403
+ 'gender': 0.1,
404
+ 'town': 0.1,
405
+ 'income': 0.1,
406
+ }
407
+
408
+ # For each attribute, a distribution of which corruptors to apply needs
409
+ # to be given, with the sum ofprobabilities to be 1.0 for each attribute
410
+ #
411
+ attr_mod_data_dictionary = {
412
+ 'given-name': [
413
+ (0.25, average_edit_corruptor),
414
+ (0.25, ocr_corruptor),
415
+ (0.25, phonetic_corruptor),
416
+ (0.25, missing_val_miss_corruptor),
417
+ ],
418
+ 'surname': [(0.5, surname_misspell_corruptor), (0.5, average_edit_corruptor)],
419
+ 'city': [(0.5, keyboard_corruptor), (0.5, missing_val_empty_corruptor)],
420
+ 'postcode': [
421
+ (0.3, missing_val_unkown_corruptor),
422
+ (0.7, sub_tra_edit_corruptor),
423
+ ],
424
+ 'oz-phone-number': [
425
+ (0.2, missing_val_empty_corruptor),
426
+ (0.4, sub_tra_edit_corruptor),
427
+ (0.4, keyboard_corruptor),
428
+ ],
429
+ 'age-death-normal': [(1.0, missing_val_unkown_corruptor)],
430
+ 'income-normal': [
431
+ (0.3, keyboard_corruptor),
432
+ (0.3, ocr_corruptor),
433
+ (0.4, missing_val_empty_corruptor),
434
+ ],
435
+ 'gender': [(0.5, sub_tra_edit_corruptor), (0.5, ocr_corruptor)],
436
+ 'town': [
437
+ (0.2, average_edit_corruptor),
438
+ (0.3, ocr_corruptor),
439
+ (0.2, keyboard_corruptor),
440
+ (0.3, phonetic_corruptor),
441
+ ],
442
+ 'income': [(1.0, missing_val_miss_corruptor)],
443
+ }
444
+
445
+ # Initialise the main data corruptor
446
+ #
447
+ test_data_corruptor = corruptor.CorruptDataSet(
448
+ number_of_org_records=num_org_rec,
449
+ number_of_mod_records=num_dup_rec,
450
+ attribute_name_list=attr_name_list,
451
+ max_num_dup_per_rec=max_duplicate_per_record,
452
+ num_dup_dist=num_duplicates_distribution,
453
+ max_num_mod_per_attr=max_modification_per_attr,
454
+ num_mod_per_rec=num_modification_per_record,
455
+ attr_mod_prob_dict=attr_mod_prob_dictionary,
456
+ attr_mod_data_dict=attr_mod_data_dictionary,
457
+ )
458
+
459
+ passed = True # Assume the test will pass :-)
460
+
461
+ # Start the generation process
462
+ #
463
+ try:
464
+ rec_dict = test_data_generator.generate()
465
+
466
+ except Exception as exce_value: # Something bad happened
467
+ test_res_list.append(
468
+ ' generator.generate() raised Exception: "%s"' % (str(exce_value))
469
+ )
470
+ return test_res_list # Abandon test
471
+
472
+ num_org_rec_gen = len(rec_dict)
473
+
474
+ if num_org_rec_gen != num_org_rec:
475
+ passed = False
476
+ test_res_list.append(
477
+ ' Wrong number of original records generated:'
478
+ + ' %d, expected %d' % (num_org_rec_gen, num_org_rec)
479
+ )
480
+
481
+ # Corrupt (modify) the original records into duplicate records
482
+ #
483
+ try:
484
+ rec_dict = test_data_corruptor.corrupt_records(rec_dict)
485
+ except Exception as exce_value: # Something bad happened
486
+ test_res_list.append(
487
+ ' corruptor.corrupt_records() raised '
488
+ + 'Exception: "%s"' % (str(exce_value))
489
+ )
490
+ return test_res_list # Abandon test
491
+
492
+ num_dup_rec_gen = len(rec_dict) - num_org_rec_gen
493
+
494
+ if num_dup_rec_gen != num_dup_rec:
495
+ passed = False
496
+ test_res_list.append(
497
+ ' Wrong number of duplicate records generated:'
498
+ + ' %d, expected %d' % (num_dup_rec_gen, num_dup_rec)
499
+ )
500
+
501
+ num_dup_counts = {} # Count how many records have a certain number of
502
+ # duplicates
503
+
504
+ # Do tests on all generated records
505
+ #
506
+ for (rec_id, rec_list) in rec_dict.iteritems():
507
+ if len(rec_list) != len(attr_name_list):
508
+ passed = False
509
+ test_res_list.append(
510
+ ' Record with identifier "%s" contains wrong' % (rec_id)
511
+ + ' number of attributes: '
512
+ + ' %d, expected %d' % (len(rec_list), len(attr_name_list))
513
+ )
514
+
515
+ if 'org' in rec_id: # An original record
516
+
517
+ # Check the number of duplicates for this record is what is expected
518
+ #
519
+ num_dups = 0
520
+ rec_num = rec_id.split('-')[1]
521
+
522
+ for d in range(max_duplicate_per_record * 2):
523
+ tmp_rec_id = 'rec-%s-dup-%d' % (rec_num, d)
524
+ if tmp_rec_id in rec_dict:
525
+ num_dups += 1
526
+ if num_dups > max_duplicate_per_record:
527
+ passed = False
528
+ test_res_list.append(
529
+ ' Too many duplicate records for original'
530
+ + ' record "%s": %d' % (rec_id),
531
+ num_dups,
532
+ )
533
+
534
+ d_count = num_dup_counts.get(num_dups, 0) + 1
535
+ num_dup_counts[num_dups] = d_count
536
+
537
+ # Check no duplicate number is outside expected range
538
+ #
539
+ for d in range(max_duplicate_per_record, max_duplicate_per_record * 2):
540
+ tmp_rec_id = 'rec-%s-dup-%d' % (rec_num, d)
541
+ if tmp_rec_id in rec_dict:
542
+ passed = False
543
+ test_res_list.append(
544
+ ' Illegal duplicate number: %s' % (tmp_rec_id)
545
+ + ' (larger than max. number '
546
+ + 'of duplicates per record %sd' % (max_duplicate_per_record)
547
+ )
548
+
549
+ # Check values in certain attributes only contain letters
550
+ #
551
+ for i in [0, 1, 2, 10, 11, 12, 13]:
552
+ test_val = rec_list[i].replace(' ', '')
553
+ test_val = test_val.replace('-', '')
554
+ test_val = test_val.replace("'", '')
555
+ if test_val.isalpha() == False:
556
+ passed = False
557
+ test_res_list.append(
558
+ ' Value in attribute "%s" is not only ' % (attr_name_list[i])
559
+ + 'letters:'
560
+ )
561
+ test_res_list.append(' Org: %s' % (str(rec_list)))
562
+
563
+ # Check values in certain attributes only contain digits
564
+ #
565
+ for i in [3, 4, 5, 6, 7, 8, 9, 14, 15, 16, 17, 18, 19]:
566
+ test_val = rec_list[i].replace(' ', '')
567
+ test_val = test_val.replace('.', '')
568
+ if test_val.isdigit() == False:
569
+ passed = False
570
+ test_res_list.append(
571
+ ' Value in attribute "%s" is not only ' % (attr_name_list[i])
572
+ + 'digits:'
573
+ )
574
+ test_res_list.append(' Org: %s' % (str(rec_list)))
575
+
576
+ # Check age values are in range
577
+ #
578
+ for i in [6, 7, 16]:
579
+ test_val = int(rec_list[i].strip())
580
+ if (test_val < 0) or (test_val > 130):
581
+ passed = False
582
+ test_res_list.append(
583
+ ' Age value in attribute "%s" is out of' % (attr_name_list[i])
584
+ + ' range:'
585
+ )
586
+ test_res_list.append(' Org: %s' % (str(rec_list)))
587
+
588
+ # Check length of postcode, telephone and credit card numbers
589
+ #
590
+ if len(rec_list[3]) != 4:
591
+ passed = False
592
+ test_res_list.append(' Postcode has not 4 digits:')
593
+ test_res_list.append(' Org: %s' % (str(rec_list)))
594
+
595
+ if (len(rec_list[4]) != 12) or (rec_list[4][0] != '0'):
596
+ passed = False
597
+ test_res_list.append(' Australian phone number has wrong format:')
598
+ test_res_list.append(' Org: %s' % (str(rec_list)))
599
+
600
+ # Check 'rating' is between 0 and 5
601
+ #
602
+ test_val = int(rec_list[9].strip())
603
+ if (test_val < 0) or (test_val > 5):
604
+ passed = False
605
+ test_res_list.append(' "rating-normal" value is out of range:')
606
+ test_res_list.append(' Org: %s' % (str(rec_list)))
607
+
608
+ # Check gender values
609
+ #
610
+ test_val = rec_list[10]
611
+ if test_val not in ['male', 'female']:
612
+ passed = False
613
+ test_res_list.append(' "gender" value is out of range:')
614
+ test_res_list.append(' Org: %s' % (str(rec_list)))
615
+
616
+ test_val = rec_list[11]
617
+ if test_val not in ['m', 'f', 'na']:
618
+ passed = False
619
+ test_res_list.append(' "alt-gender" value is out of range:')
620
+ test_res_list.append(' Org: %s' % (str(rec_list)))
621
+
622
+ test_val = rec_list[12]
623
+ if test_val not in ['male', 'female']:
624
+ passed = False
625
+ test_res_list.append(' "alt-gender-2" value is out of range:')
626
+ test_res_list.append(' Org: %s' % (str(rec_list)))
627
+
628
+ if 'dup' in rec_id: # A duplicate record
629
+
630
+ # Get the corresponding original record
631
+ #
632
+ org_rec_id = 'rec-%s-org' % (rec_id.split('-')[1])
633
+ org_rec_list = rec_dict[org_rec_id]
634
+
635
+ # Check the duplicate number
636
+ #
637
+ dup_num = int(rec_id.split('-')[-1])
638
+ if (dup_num < 0) or (dup_num > max_duplicate_per_record - 1):
639
+ passed = False
640
+ test_res_list.append(
641
+ ' Duplicate record with identifier "%s" ' % (rec_id)
642
+ + ' has an illegal duplicate number:'
643
+ + ' %d' % (dup_num)
644
+ )
645
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
646
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
647
+
648
+ # Check that a duplicate record contains the expected - - - - - - - - -
649
+ # number of modifications
650
+
651
+ num_diff_val = 0 # Count how many values are different
652
+
653
+ for i in range(len(rec_list)): # Check all attribute values
654
+ if rec_list[i] != org_rec_list[i]:
655
+ num_diff_val += 1
656
+
657
+ if num_diff_val == 0: # No differences between org and dup record
658
+ passed = False
659
+ test_res_list.append(
660
+ ' Duplicate record with identifier "%s" ' % (rec_id)
661
+ + 'is the same as it original record'
662
+ )
663
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
664
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
665
+
666
+ if num_diff_val < num_modification_per_record:
667
+ passed = False
668
+ test_res_list.append(
669
+ ' Duplicate record with identifier "%s" ' % (rec_id)
670
+ + 'contains less modifications '
671
+ + 'than expected (%d instead of %d)'
672
+ % (num_diff_val, num_modification_per_record)
673
+ )
674
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
675
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
676
+
677
+ # Check that certain attributes have not been modified
678
+ #
679
+ for i in [5, 6, 9, 11, 12, 15, 16, 17, 18, 19]:
680
+ if rec_list[i] != org_rec_list[i]:
681
+ passed = False
682
+ test_res_list.append(
683
+ ' Duplicate record with identifier "%s" ' % (rec_id)
684
+ + 'contains modified attribute '
685
+ + 'values that should not be modified'
686
+ )
687
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
688
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
689
+
690
+ # Check the content of certain attribute values, and how they
691
+ # differ between original and duplicate records
692
+ #
693
+ # Due to the possibility thatmultiple modifications are applied on the
694
+ # same attribute these tests are limited
695
+
696
+ test_org_val = org_rec_list[2] # City
697
+ test_dup_val = rec_list[2]
698
+ if test_dup_val != '':
699
+ if len(test_org_val) != len(test_dup_val):
700
+ passed = False
701
+ test_res_list.append(' "city" values have different length:')
702
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
703
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
704
+
705
+ test_org_val = org_rec_list[4] # Australian phone number
706
+ test_dup_val = rec_list[4]
707
+ if test_dup_val != '':
708
+ if len(test_org_val) != len(test_dup_val):
709
+ passed = False
710
+ test_res_list.append(
711
+ ' "oz-phone-number" values have different' + ' length:'
712
+ )
713
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
714
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
715
+
716
+ test_org_val = org_rec_list[7] # Age-death-normal
717
+ test_dup_val = rec_list[7]
718
+ if test_dup_val != 'unknown':
719
+ if test_org_val != test_dup_val:
720
+ passed = False
721
+ test_res_list.append(' Wrong value for "age-death-normal":')
722
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
723
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
724
+
725
+ test_org_val = org_rec_list[14] # Income
726
+ test_dup_val = rec_list[14]
727
+ if test_dup_val != 'miss':
728
+ if test_org_val != test_dup_val:
729
+ passed = False
730
+ test_res_list.append(' Wrong value for "income":')
731
+ test_res_list.append(' Org: %s' % (str(org_rec_list)))
732
+ test_res_list.append(' Dup: %s' % (str(rec_list)))
733
+
734
+ test_res_list.append(
735
+ ' Distribution of duplicates: ("%s" expected)' % num_duplicates_distribution
736
+ )
737
+ dup_keys = num_dup_counts.keys()
738
+ dup_keys.sort()
739
+ for d in dup_keys:
740
+ test_res_list.append(' %d: %d records' % (d, num_dup_counts[d]))
741
+ test_res_list.append('')
742
+
743
+ if passed == True:
744
+ test_res_list.append(' All tests passed')
745
+ test_res_list.append('')
746
+
747
+ return test_res_list
748
+
749
+
750
+ # =============================================================================
751
+ # Generate a time string to be used for the log file
752
+ #
753
+ curr_time_tuple = time.localtime()
754
+ curr_time_str = (
755
+ str(curr_time_tuple[0])
756
+ + str(curr_time_tuple[1]).zfill(2)
757
+ + str(curr_time_tuple[2]).zfill(2)
758
+ + '-'
759
+ + str(curr_time_tuple[3]).zfill(2)
760
+ + str(curr_time_tuple[4]).zfill(2)
761
+ )
762
+
763
+ # Write test output header line into the log file
764
+ #
765
+ Path('./logs').mkdir(exist_ok=True)
766
+ out_file_name = './logs/mainTest-%s.txt' % (curr_time_str)
767
+
768
+ out_file = open(out_file_name, 'w')
769
+
770
+ out_file.write('Test results generated by mainTest.py' + os.linesep)
771
+
772
+ out_file.write('Test started: ' + curr_time_str + os.linesep)
773
+
774
+ out_file.write(os.linesep)
775
+
776
+ for test_case in test_cases:
777
+
778
+ # Create instances for the testcase class that calls all tests
779
+ #
780
+ test_case_ins = TestCase('testDataGeneration')
781
+ test_res_list = test_case_ins.testDataGeneration(test_case)
782
+
783
+ # Write test output results into the log file
784
+ #
785
+ for line in test_res_list:
786
+ out_file.write(line + os.linesep)
787
+
788
+ for line in test_res_list:
789
+ print(line)
790
+
791
+ out_file.close()
792
+
793
+ print('Test results are written to', out_file_name)
tests/test.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ attr1,attr2,attr3
2
+ id1,peter,lyneham
3
+ id2,miller,dickson
4
+ id3,smith,hackett
tests/test.txt ADDED
File without changes
tests/test1.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ attr1,attr2,attr3
2
+ id1,peter,lyneham
3
+ id2,miller,dickson
4
+ id3,smith,hackett