repo_name
stringlengths
8
38
pr_number
int64
3
47.1k
pr_title
stringlengths
8
175
pr_description
stringlengths
2
19.8k
author
null
date_created
stringlengths
25
25
date_merged
stringlengths
25
25
filepath
stringlengths
6
136
before_content
stringlengths
54
884k
after_content
stringlengths
56
884k
pr_author
stringlengths
3
21
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
comment
stringlengths
2
25.4k
comment_author
stringlengths
3
29
__index_level_0__
int64
0
5.1k
scikit-learn-contrib/category_encoders
398
(WIP) Partial fix for getting feature names out
I think this is a partial fix for this opened issue: https://github.com/scikit-learn-contrib/category_encoders/issues/395 It remains to check the behaviour of other estimators that are not ONE_TO_ONE. Please, let me know if you like the work in progress and I will try to continue.
null
2023-02-23 13:33:41+00:00
2023-03-13 11:48:24+00:00
tests/test_rankhot.py
import pandas as pd from unittest import TestCase import tests.helpers as th import numpy as np import category_encoders as encoders np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) class TestRankHotEncoder(TestCase): def test_handleNaNvalue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['none']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1]-(X.shape[1]-1), len(X.none.unique())) self.assertTupleEqual(inv_tf.shape,X.shape) def test_handleCategoricalValue(self): enc = encoders.RankHotEncoder(cols=['categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.categorical.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_naCatagoricalValue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['na_categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertTupleEqual(inv_tf.shape, X.shape) def test_extraValue(self): train = pd.DataFrame({'city': ['chicago', 'st louis', 'chicago', "st louis"]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.RankHotEncoder(handle_unknown='value') train_out = enc.fit_transform(train) expected_mapping = pd.DataFrame([[1, 0],[1, 1],], columns=["city_1", "city_2"], index=[1,2]) expected_out_train = pd.DataFrame([[1, 0],[1, 1],[1, 0],[1, 1],], columns=["city_1", "city_2"]) expected_out_test = pd.DataFrame([[1, 0],[0, 0],], columns=["city_1", "city_2"]) pd.testing.assert_frame_equal(train_out, expected_out_train) pd.testing.assert_frame_equal(enc.mapping[0]["mapping"], expected_mapping) t_f = enc.transform(test) pd.testing.assert_frame_equal(t_f, expected_out_test) inv_tf = enc.inverse_transform(t_f) expected_inverse_test = pd.DataFrame({'city': ['chicago', np.nan]}) th.verify_inverse_transform(expected_inverse_test, inv_tf) def test_invariant(self): enc = encoders.RankHotEncoder(cols=['invariant'], drop_invariant=True) enc.fit(X) self.assertFalse(any([c.startswith("invariant") for c in enc.feature_names_out_])) self.assertTrue(any([c.startswith("invariant") for c in enc.invariant_cols])) def test_categoricalNaming(self): train = pd.DataFrame({'city': ['chicago', 'st louis']}) enc = encoders.RankHotEncoder(use_cat_names=True) enc.fit(train) tf = enc.transform(train) self.assertListEqual(['city_chicago', 'city_st louis'], list(tf.columns)) def test_rankhot(self): enc = encoders.RankHotEncoder(verbose=1) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) th.verify_inverse_transform(X, inv_tf) def test_order(self): """ Since RankHotEncoding respects the order in ordinal variables, the mapping should be independent of input order """ train_order_1 = pd.DataFrame({'grade': ['B', 'A', 'C', 'F', 'D', 'C', 'F', 'D'], "ord_var": [1, 3, 2, 2, 2, 1, 3, 1]}) train_order_2 = pd.DataFrame({'grade': ['A', 'D', 'C', 'B', 'C', 'F', 'F', 'D'], "ord_var": [3, 1, 2, 2, 2, 1, 3, 1]}) enc = encoders.RankHotEncoder(cols=["grade", "ord_var"]) enc.fit(train_order_1) mapping_order_1 = enc.ordinal_encoder.mapping enc.fit(train_order_2) mapping_order_2 = enc.ordinal_encoder.mapping for m1, m2 in zip(mapping_order_1, mapping_order_2): self.assertEqual(m1["col"], m2["col"]) pd.testing.assert_series_equal(m1["mapping"], m2["mapping"])
import pandas as pd from unittest import TestCase import tests.helpers as th import numpy as np import category_encoders as encoders np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) class TestRankHotEncoder(TestCase): def test_handleNaNvalue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['none']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.none.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_handleCategoricalValue(self): enc = encoders.RankHotEncoder(cols=['categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.categorical.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_naCatagoricalValue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['na_categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertTupleEqual(inv_tf.shape, X.shape) def test_extraValue(self): train = pd.DataFrame({'city': ['chicago', 'st louis', 'chicago', "st louis"]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.RankHotEncoder(handle_unknown='value') train_out = enc.fit_transform(train) expected_mapping = pd.DataFrame([[1, 0], [1, 1], ], columns=["city_1", "city_2"], index=[1, 2]) expected_out_train = pd.DataFrame([[1, 0], [1, 1], [1, 0], [1, 1], ], columns=["city_1", "city_2"]) expected_out_test = pd.DataFrame([[1, 0], [0, 0], ], columns=["city_1", "city_2"]) pd.testing.assert_frame_equal(train_out, expected_out_train) pd.testing.assert_frame_equal(enc.mapping[0]["mapping"], expected_mapping, check_dtype=False) t_f = enc.transform(test) pd.testing.assert_frame_equal(t_f, expected_out_test) inv_tf = enc.inverse_transform(t_f) expected_inverse_test = pd.DataFrame({'city': ['chicago', np.nan]}) th.verify_inverse_transform(expected_inverse_test, inv_tf) def test_invariant(self): enc = encoders.RankHotEncoder(cols=['invariant'], drop_invariant=True) enc.fit(X) self.assertFalse(any([c.startswith("invariant") for c in enc.feature_names_out_])) self.assertTrue(any([c.startswith("invariant") for c in enc.invariant_cols])) def test_categoricalNaming(self): train = pd.DataFrame({'city': ['chicago', 'st louis']}) enc = encoders.RankHotEncoder(use_cat_names=True) enc.fit(train) tf = enc.transform(train) self.assertListEqual(['city_chicago', 'city_st louis'], list(tf.columns)) def test_rankhot(self): enc = encoders.RankHotEncoder(verbose=1) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) th.verify_inverse_transform(X, inv_tf) def test_order(self): """ Since RankHotEncoding respects the order in ordinal variables, the mapping should be independent of input order """ train_order_1 = pd.DataFrame({'grade': ['B', 'A', 'C', 'F', 'D', 'C', 'F', 'D'], "ord_var": [1, 3, 2, 2, 2, 1, 3, 1]}) train_order_2 = pd.DataFrame({'grade': ['A', 'D', 'C', 'B', 'C', 'F', 'F', 'D'], "ord_var": [3, 1, 2, 2, 2, 1, 3, 1]}) enc = encoders.RankHotEncoder(cols=["grade", "ord_var"]) enc.fit(train_order_1) mapping_order_1 = enc.ordinal_encoder.mapping enc.fit(train_order_2) mapping_order_2 = enc.ordinal_encoder.mapping for m1, m2 in zip(mapping_order_1, mapping_order_2): self.assertEqual(m1["col"], m2["col"]) pd.testing.assert_series_equal(m1["mapping"], m2["mapping"])
JaimeArboleda
5eb7a2d6359d680bdadd0534bdb983e712a47f9c
570827e6b48737d0c9aece8aca31edd6da02c1b2
why no longer check the dtype? the test hasn't changed except for spacing, has it? why relax it?
PaulWestenthanner
36
scikit-learn-contrib/category_encoders
398
(WIP) Partial fix for getting feature names out
I think this is a partial fix for this opened issue: https://github.com/scikit-learn-contrib/category_encoders/issues/395 It remains to check the behaviour of other estimators that are not ONE_TO_ONE. Please, let me know if you like the work in progress and I will try to continue.
null
2023-02-23 13:33:41+00:00
2023-03-13 11:48:24+00:00
tests/test_rankhot.py
import pandas as pd from unittest import TestCase import tests.helpers as th import numpy as np import category_encoders as encoders np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) class TestRankHotEncoder(TestCase): def test_handleNaNvalue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['none']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1]-(X.shape[1]-1), len(X.none.unique())) self.assertTupleEqual(inv_tf.shape,X.shape) def test_handleCategoricalValue(self): enc = encoders.RankHotEncoder(cols=['categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.categorical.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_naCatagoricalValue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['na_categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertTupleEqual(inv_tf.shape, X.shape) def test_extraValue(self): train = pd.DataFrame({'city': ['chicago', 'st louis', 'chicago', "st louis"]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.RankHotEncoder(handle_unknown='value') train_out = enc.fit_transform(train) expected_mapping = pd.DataFrame([[1, 0],[1, 1],], columns=["city_1", "city_2"], index=[1,2]) expected_out_train = pd.DataFrame([[1, 0],[1, 1],[1, 0],[1, 1],], columns=["city_1", "city_2"]) expected_out_test = pd.DataFrame([[1, 0],[0, 0],], columns=["city_1", "city_2"]) pd.testing.assert_frame_equal(train_out, expected_out_train) pd.testing.assert_frame_equal(enc.mapping[0]["mapping"], expected_mapping) t_f = enc.transform(test) pd.testing.assert_frame_equal(t_f, expected_out_test) inv_tf = enc.inverse_transform(t_f) expected_inverse_test = pd.DataFrame({'city': ['chicago', np.nan]}) th.verify_inverse_transform(expected_inverse_test, inv_tf) def test_invariant(self): enc = encoders.RankHotEncoder(cols=['invariant'], drop_invariant=True) enc.fit(X) self.assertFalse(any([c.startswith("invariant") for c in enc.feature_names_out_])) self.assertTrue(any([c.startswith("invariant") for c in enc.invariant_cols])) def test_categoricalNaming(self): train = pd.DataFrame({'city': ['chicago', 'st louis']}) enc = encoders.RankHotEncoder(use_cat_names=True) enc.fit(train) tf = enc.transform(train) self.assertListEqual(['city_chicago', 'city_st louis'], list(tf.columns)) def test_rankhot(self): enc = encoders.RankHotEncoder(verbose=1) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) th.verify_inverse_transform(X, inv_tf) def test_order(self): """ Since RankHotEncoding respects the order in ordinal variables, the mapping should be independent of input order """ train_order_1 = pd.DataFrame({'grade': ['B', 'A', 'C', 'F', 'D', 'C', 'F', 'D'], "ord_var": [1, 3, 2, 2, 2, 1, 3, 1]}) train_order_2 = pd.DataFrame({'grade': ['A', 'D', 'C', 'B', 'C', 'F', 'F', 'D'], "ord_var": [3, 1, 2, 2, 2, 1, 3, 1]}) enc = encoders.RankHotEncoder(cols=["grade", "ord_var"]) enc.fit(train_order_1) mapping_order_1 = enc.ordinal_encoder.mapping enc.fit(train_order_2) mapping_order_2 = enc.ordinal_encoder.mapping for m1, m2 in zip(mapping_order_1, mapping_order_2): self.assertEqual(m1["col"], m2["col"]) pd.testing.assert_series_equal(m1["mapping"], m2["mapping"])
import pandas as pd from unittest import TestCase import tests.helpers as th import numpy as np import category_encoders as encoders np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) class TestRankHotEncoder(TestCase): def test_handleNaNvalue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['none']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.none.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_handleCategoricalValue(self): enc = encoders.RankHotEncoder(cols=['categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.categorical.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_naCatagoricalValue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['na_categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertTupleEqual(inv_tf.shape, X.shape) def test_extraValue(self): train = pd.DataFrame({'city': ['chicago', 'st louis', 'chicago', "st louis"]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.RankHotEncoder(handle_unknown='value') train_out = enc.fit_transform(train) expected_mapping = pd.DataFrame([[1, 0], [1, 1], ], columns=["city_1", "city_2"], index=[1, 2]) expected_out_train = pd.DataFrame([[1, 0], [1, 1], [1, 0], [1, 1], ], columns=["city_1", "city_2"]) expected_out_test = pd.DataFrame([[1, 0], [0, 0], ], columns=["city_1", "city_2"]) pd.testing.assert_frame_equal(train_out, expected_out_train) pd.testing.assert_frame_equal(enc.mapping[0]["mapping"], expected_mapping, check_dtype=False) t_f = enc.transform(test) pd.testing.assert_frame_equal(t_f, expected_out_test) inv_tf = enc.inverse_transform(t_f) expected_inverse_test = pd.DataFrame({'city': ['chicago', np.nan]}) th.verify_inverse_transform(expected_inverse_test, inv_tf) def test_invariant(self): enc = encoders.RankHotEncoder(cols=['invariant'], drop_invariant=True) enc.fit(X) self.assertFalse(any([c.startswith("invariant") for c in enc.feature_names_out_])) self.assertTrue(any([c.startswith("invariant") for c in enc.invariant_cols])) def test_categoricalNaming(self): train = pd.DataFrame({'city': ['chicago', 'st louis']}) enc = encoders.RankHotEncoder(use_cat_names=True) enc.fit(train) tf = enc.transform(train) self.assertListEqual(['city_chicago', 'city_st louis'], list(tf.columns)) def test_rankhot(self): enc = encoders.RankHotEncoder(verbose=1) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) th.verify_inverse_transform(X, inv_tf) def test_order(self): """ Since RankHotEncoding respects the order in ordinal variables, the mapping should be independent of input order """ train_order_1 = pd.DataFrame({'grade': ['B', 'A', 'C', 'F', 'D', 'C', 'F', 'D'], "ord_var": [1, 3, 2, 2, 2, 1, 3, 1]}) train_order_2 = pd.DataFrame({'grade': ['A', 'D', 'C', 'B', 'C', 'F', 'F', 'D'], "ord_var": [3, 1, 2, 2, 2, 1, 3, 1]}) enc = encoders.RankHotEncoder(cols=["grade", "ord_var"]) enc.fit(train_order_1) mapping_order_1 = enc.ordinal_encoder.mapping enc.fit(train_order_2) mapping_order_2 = enc.ordinal_encoder.mapping for m1, m2 in zip(mapping_order_1, mapping_order_2): self.assertEqual(m1["col"], m2["col"]) pd.testing.assert_series_equal(m1["mapping"], m2["mapping"])
JaimeArboleda
5eb7a2d6359d680bdadd0534bdb983e712a47f9c
570827e6b48737d0c9aece8aca31edd6da02c1b2
I checked if all past tests were still running OK, and I found this particular test failed due to some `dtype` issue. What happened was that one column was of `int32` and the other of `int64` or something like that. But the content was the same. So I added this relaxation so that the test passed. Maybe it depends on some pandas version or whatever? I don't know...
JaimeArboleda
37
scikit-learn-contrib/category_encoders
398
(WIP) Partial fix for getting feature names out
I think this is a partial fix for this opened issue: https://github.com/scikit-learn-contrib/category_encoders/issues/395 It remains to check the behaviour of other estimators that are not ONE_TO_ONE. Please, let me know if you like the work in progress and I will try to continue.
null
2023-02-23 13:33:41+00:00
2023-03-13 11:48:24+00:00
tests/test_rankhot.py
import pandas as pd from unittest import TestCase import tests.helpers as th import numpy as np import category_encoders as encoders np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) class TestRankHotEncoder(TestCase): def test_handleNaNvalue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['none']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1]-(X.shape[1]-1), len(X.none.unique())) self.assertTupleEqual(inv_tf.shape,X.shape) def test_handleCategoricalValue(self): enc = encoders.RankHotEncoder(cols=['categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.categorical.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_naCatagoricalValue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['na_categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertTupleEqual(inv_tf.shape, X.shape) def test_extraValue(self): train = pd.DataFrame({'city': ['chicago', 'st louis', 'chicago', "st louis"]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.RankHotEncoder(handle_unknown='value') train_out = enc.fit_transform(train) expected_mapping = pd.DataFrame([[1, 0],[1, 1],], columns=["city_1", "city_2"], index=[1,2]) expected_out_train = pd.DataFrame([[1, 0],[1, 1],[1, 0],[1, 1],], columns=["city_1", "city_2"]) expected_out_test = pd.DataFrame([[1, 0],[0, 0],], columns=["city_1", "city_2"]) pd.testing.assert_frame_equal(train_out, expected_out_train) pd.testing.assert_frame_equal(enc.mapping[0]["mapping"], expected_mapping) t_f = enc.transform(test) pd.testing.assert_frame_equal(t_f, expected_out_test) inv_tf = enc.inverse_transform(t_f) expected_inverse_test = pd.DataFrame({'city': ['chicago', np.nan]}) th.verify_inverse_transform(expected_inverse_test, inv_tf) def test_invariant(self): enc = encoders.RankHotEncoder(cols=['invariant'], drop_invariant=True) enc.fit(X) self.assertFalse(any([c.startswith("invariant") for c in enc.feature_names_out_])) self.assertTrue(any([c.startswith("invariant") for c in enc.invariant_cols])) def test_categoricalNaming(self): train = pd.DataFrame({'city': ['chicago', 'st louis']}) enc = encoders.RankHotEncoder(use_cat_names=True) enc.fit(train) tf = enc.transform(train) self.assertListEqual(['city_chicago', 'city_st louis'], list(tf.columns)) def test_rankhot(self): enc = encoders.RankHotEncoder(verbose=1) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) th.verify_inverse_transform(X, inv_tf) def test_order(self): """ Since RankHotEncoding respects the order in ordinal variables, the mapping should be independent of input order """ train_order_1 = pd.DataFrame({'grade': ['B', 'A', 'C', 'F', 'D', 'C', 'F', 'D'], "ord_var": [1, 3, 2, 2, 2, 1, 3, 1]}) train_order_2 = pd.DataFrame({'grade': ['A', 'D', 'C', 'B', 'C', 'F', 'F', 'D'], "ord_var": [3, 1, 2, 2, 2, 1, 3, 1]}) enc = encoders.RankHotEncoder(cols=["grade", "ord_var"]) enc.fit(train_order_1) mapping_order_1 = enc.ordinal_encoder.mapping enc.fit(train_order_2) mapping_order_2 = enc.ordinal_encoder.mapping for m1, m2 in zip(mapping_order_1, mapping_order_2): self.assertEqual(m1["col"], m2["col"]) pd.testing.assert_series_equal(m1["mapping"], m2["mapping"])
import pandas as pd from unittest import TestCase import tests.helpers as th import numpy as np import category_encoders as encoders np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) class TestRankHotEncoder(TestCase): def test_handleNaNvalue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['none']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.none.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_handleCategoricalValue(self): enc = encoders.RankHotEncoder(cols=['categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertEqual(t_f.shape[1] - (X.shape[1] - 1), len(X.categorical.unique())) self.assertTupleEqual(inv_tf.shape, X.shape) def test_naCatagoricalValue(self): enc = encoders.RankHotEncoder(handle_unknown='value', cols=['na_categorical']) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) self.assertTupleEqual(inv_tf.shape, X.shape) def test_extraValue(self): train = pd.DataFrame({'city': ['chicago', 'st louis', 'chicago', "st louis"]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.RankHotEncoder(handle_unknown='value') train_out = enc.fit_transform(train) expected_mapping = pd.DataFrame([[1, 0], [1, 1], ], columns=["city_1", "city_2"], index=[1, 2]) expected_out_train = pd.DataFrame([[1, 0], [1, 1], [1, 0], [1, 1], ], columns=["city_1", "city_2"]) expected_out_test = pd.DataFrame([[1, 0], [0, 0], ], columns=["city_1", "city_2"]) pd.testing.assert_frame_equal(train_out, expected_out_train) pd.testing.assert_frame_equal(enc.mapping[0]["mapping"], expected_mapping, check_dtype=False) t_f = enc.transform(test) pd.testing.assert_frame_equal(t_f, expected_out_test) inv_tf = enc.inverse_transform(t_f) expected_inverse_test = pd.DataFrame({'city': ['chicago', np.nan]}) th.verify_inverse_transform(expected_inverse_test, inv_tf) def test_invariant(self): enc = encoders.RankHotEncoder(cols=['invariant'], drop_invariant=True) enc.fit(X) self.assertFalse(any([c.startswith("invariant") for c in enc.feature_names_out_])) self.assertTrue(any([c.startswith("invariant") for c in enc.invariant_cols])) def test_categoricalNaming(self): train = pd.DataFrame({'city': ['chicago', 'st louis']}) enc = encoders.RankHotEncoder(use_cat_names=True) enc.fit(train) tf = enc.transform(train) self.assertListEqual(['city_chicago', 'city_st louis'], list(tf.columns)) def test_rankhot(self): enc = encoders.RankHotEncoder(verbose=1) enc.fit(X) t_f = enc.transform(X) inv_tf = enc.inverse_transform(t_f) th.verify_inverse_transform(X, inv_tf) def test_order(self): """ Since RankHotEncoding respects the order in ordinal variables, the mapping should be independent of input order """ train_order_1 = pd.DataFrame({'grade': ['B', 'A', 'C', 'F', 'D', 'C', 'F', 'D'], "ord_var": [1, 3, 2, 2, 2, 1, 3, 1]}) train_order_2 = pd.DataFrame({'grade': ['A', 'D', 'C', 'B', 'C', 'F', 'F', 'D'], "ord_var": [3, 1, 2, 2, 2, 1, 3, 1]}) enc = encoders.RankHotEncoder(cols=["grade", "ord_var"]) enc.fit(train_order_1) mapping_order_1 = enc.ordinal_encoder.mapping enc.fit(train_order_2) mapping_order_2 = enc.ordinal_encoder.mapping for m1, m2 in zip(mapping_order_1, mapping_order_2): self.assertEqual(m1["col"], m2["col"]) pd.testing.assert_series_equal(m1["mapping"], m2["mapping"])
JaimeArboleda
5eb7a2d6359d680bdadd0534bdb983e712a47f9c
570827e6b48737d0c9aece8aca31edd6da02c1b2
since you haven't really touched this, might just be on your machine, not on the pipeline. But I'm fine with the relaxation
PaulWestenthanner
38
scikit-learn-contrib/category_encoders
396
OneHotEncoder: Adding handle_missing='ignore' option
Closes #386 ## Proposed Changes - added **ignore** option to the `handle_missing` parameter of the `OneHotEncoder`. This will encode `NaN` values as 0 in every dummy column. However, compared to the **value** option, no additional "_nan" category is created. - added a simple test for the new option.
null
2023-01-23 15:57:00+00:00
2023-01-24 14:38:08+00:00
tests/test_one_hot.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import numpy as np import tests.helpers as th import category_encoders as encoders class TestOneHotEncoderTestCase(TestCase): def test_one_hot(self): X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) enc = encoders.OneHotEncoder(verbose=1, return_df=False) enc.fit(X) self.assertEqual(enc.transform(X_t).shape[1], enc.transform(X).shape[1], 'We have to get the same count of columns despite the presence of a new value') enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan') enc.fit(X) out = enc.transform(X_t) self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error') # The exception is already raised in fit() because transform() is called there to get # feature_names right. enc.fit(X) with self.assertRaises(ValueError): enc.transform(X_t) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True) enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_A', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) # test inverse_transform X_i = th.create_dataset(n_rows=100, has_missing=False) X_i_t = th.create_dataset(n_rows=50, has_missing=False) cols = ['underscore', 'none', 'extra', 'categorical'] enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols) enc.fit(X_i) obtained = enc.inverse_transform(enc.transform(X_i_t)) th.verify_inverse_transform(X_i_t, obtained) def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self): encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator', return_df=False) result = encoder.fit_transform([[-1]]) self.assertListEqual([[1, 0]], result.tolist()) def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator') value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')}) result = encoder.fit_transform(value) columns = result.columns.tolist() self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns)) def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1], 'city_-1': [0, 0]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0], 'city_-1': [0, 1]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingError(self): data_no_missing = ['A', 'B', 'B'] data_w_missing = [np.nan, 'B', 'B'] encoder = encoders.OneHotEncoder(handle_missing="error") result = encoder.fit_transform(data_no_missing) expected = [[1, 0], [0, 1], [0, 1]] self.assertEqual(result.values.tolist(), expected) self.assertRaisesRegex(ValueError, '.*null.*', encoder.transform, data_w_missing) self.assertRaisesRegex(ValueError, '.*null.*', encoder.fit, data_w_missing) def test_HandleMissingReturnNan(self): train = pd.DataFrame({'x': ['A', np.nan, 'B']}) encoder = encoders.OneHotEncoder(handle_missing='return_nan', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal( result, pd.DataFrame({'x_A': [1, np.nan, 0], 'x_B': [0, np.nan, 1]}), ) def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self): train = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self): train = ['A', 'B'] test = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') encoded_train = encoder.fit_transform(train) encoded_test = encoder.transform(test) expected_1 = [[1, 0, 0], [0, 1, 0]] self.assertEqual(encoded_train.values.tolist(), expected_1) expected_2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(encoded_test.values.tolist(), expected_2) def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self): train = ['A', 'B'] test = ['A', 'B', 'C'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') encoder.fit(train) result = encoder.transform(test) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) message = 'inverse_transform is not supported because transform impute '\ 'the unknown category nan when encode city' with self.assertWarns(UserWarning, msg=message) as w: enc.inverse_transform(result) def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']}) expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(expected, original)
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import numpy as np import tests.helpers as th import category_encoders as encoders class TestOneHotEncoderTestCase(TestCase): def test_one_hot(self): X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) enc = encoders.OneHotEncoder(verbose=1, return_df=False) enc.fit(X) self.assertEqual(enc.transform(X_t).shape[1], enc.transform(X).shape[1], 'We have to get the same count of columns despite the presence of a new value') enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan') enc.fit(X) out = enc.transform(X_t) self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error') # The exception is already raised in fit() because transform() is called there to get # feature_names right. enc.fit(X) with self.assertRaises(ValueError): enc.transform(X_t) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True) enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_A', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) # test inverse_transform X_i = th.create_dataset(n_rows=100, has_missing=False) X_i_t = th.create_dataset(n_rows=50, has_missing=False) cols = ['underscore', 'none', 'extra', 'categorical'] enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols) enc.fit(X_i) obtained = enc.inverse_transform(enc.transform(X_i_t)) th.verify_inverse_transform(X_i_t, obtained) def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self): encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator', return_df=False) result = encoder.fit_transform([[-1]]) self.assertListEqual([[1, 0]], result.tolist()) def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator') value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')}) result = encoder.fit_transform(value) columns = result.columns.tolist() self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns)) def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1], 'city_-1': [0, 0]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0], 'city_-1': [0, 1]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingError(self): data_no_missing = ['A', 'B', 'B'] data_w_missing = [np.nan, 'B', 'B'] encoder = encoders.OneHotEncoder(handle_missing="error") result = encoder.fit_transform(data_no_missing) expected = [[1, 0], [0, 1], [0, 1]] self.assertEqual(result.values.tolist(), expected) self.assertRaisesRegex(ValueError, '.*null.*', encoder.transform, data_w_missing) self.assertRaisesRegex(ValueError, '.*null.*', encoder.fit, data_w_missing) def test_HandleMissingReturnNan(self): train = pd.DataFrame({'x': ['A', np.nan, 'B']}) encoder = encoders.OneHotEncoder(handle_missing='return_nan', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal( result, pd.DataFrame({'x_A': [1, np.nan, 0], 'x_B': [0, np.nan, 1]}), ) def test_HandleMissingIgnore(self): train = pd.DataFrame({'x': ['A', 'B', np.nan], 'y': ['A', None, 'A'], 'z': [np.NaN, 'B', 'B']}) train['z'] = train['z'].astype('category') expected_result = pd.DataFrame({'x_A': [1, 0, 0], 'x_B': [0, 1, 0], 'y_A': [1, 0, 1], 'z_B': [0, 1, 1]}) encoder = encoders.OneHotEncoder(handle_missing='ignore', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal(result, expected_result) def test_HandleMissingIgnore_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', np.NaN,'Geneva']}) expected_result = pd.DataFrame({'city_1': [1, 0, 0], 'city_3': [0, 0, 1]}) encoder = encoders.OneHotEncoder(handle_missing='ignore') result = encoder.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self): train = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self): train = ['A', 'B'] test = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') encoded_train = encoder.fit_transform(train) encoded_test = encoder.transform(test) expected_1 = [[1, 0, 0], [0, 1, 0]] self.assertEqual(encoded_train.values.tolist(), expected_1) expected_2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(encoded_test.values.tolist(), expected_2) def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self): train = ['A', 'B'] test = ['A', 'B', 'C'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') encoder.fit(train) result = encoder.transform(test) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) message = 'inverse_transform is not supported because transform impute '\ 'the unknown category nan when encode city' with self.assertWarns(UserWarning, msg=message) as w: enc.inverse_transform(result) def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']}) expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(expected, original)
woodly0
49c62c7b782b04f310a7d48c674b2ee6f3987541
f66949194ee608e532da5ef86c9078c66e40a145
could you please add two more columns to the train data set: - a column containing a `None` instead of a `np.nan` - a column of type `pd.Categorical` The often turn out to be edge cases and I think it makes sense to have them tested
PaulWestenthanner
39
scikit-learn-contrib/category_encoders
396
OneHotEncoder: Adding handle_missing='ignore' option
Closes #386 ## Proposed Changes - added **ignore** option to the `handle_missing` parameter of the `OneHotEncoder`. This will encode `NaN` values as 0 in every dummy column. However, compared to the **value** option, no additional "_nan" category is created. - added a simple test for the new option.
null
2023-01-23 15:57:00+00:00
2023-01-24 14:38:08+00:00
tests/test_one_hot.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import numpy as np import tests.helpers as th import category_encoders as encoders class TestOneHotEncoderTestCase(TestCase): def test_one_hot(self): X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) enc = encoders.OneHotEncoder(verbose=1, return_df=False) enc.fit(X) self.assertEqual(enc.transform(X_t).shape[1], enc.transform(X).shape[1], 'We have to get the same count of columns despite the presence of a new value') enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan') enc.fit(X) out = enc.transform(X_t) self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error') # The exception is already raised in fit() because transform() is called there to get # feature_names right. enc.fit(X) with self.assertRaises(ValueError): enc.transform(X_t) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True) enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_A', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) # test inverse_transform X_i = th.create_dataset(n_rows=100, has_missing=False) X_i_t = th.create_dataset(n_rows=50, has_missing=False) cols = ['underscore', 'none', 'extra', 'categorical'] enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols) enc.fit(X_i) obtained = enc.inverse_transform(enc.transform(X_i_t)) th.verify_inverse_transform(X_i_t, obtained) def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self): encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator', return_df=False) result = encoder.fit_transform([[-1]]) self.assertListEqual([[1, 0]], result.tolist()) def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator') value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')}) result = encoder.fit_transform(value) columns = result.columns.tolist() self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns)) def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1], 'city_-1': [0, 0]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0], 'city_-1': [0, 1]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingError(self): data_no_missing = ['A', 'B', 'B'] data_w_missing = [np.nan, 'B', 'B'] encoder = encoders.OneHotEncoder(handle_missing="error") result = encoder.fit_transform(data_no_missing) expected = [[1, 0], [0, 1], [0, 1]] self.assertEqual(result.values.tolist(), expected) self.assertRaisesRegex(ValueError, '.*null.*', encoder.transform, data_w_missing) self.assertRaisesRegex(ValueError, '.*null.*', encoder.fit, data_w_missing) def test_HandleMissingReturnNan(self): train = pd.DataFrame({'x': ['A', np.nan, 'B']}) encoder = encoders.OneHotEncoder(handle_missing='return_nan', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal( result, pd.DataFrame({'x_A': [1, np.nan, 0], 'x_B': [0, np.nan, 1]}), ) def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self): train = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self): train = ['A', 'B'] test = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') encoded_train = encoder.fit_transform(train) encoded_test = encoder.transform(test) expected_1 = [[1, 0, 0], [0, 1, 0]] self.assertEqual(encoded_train.values.tolist(), expected_1) expected_2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(encoded_test.values.tolist(), expected_2) def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self): train = ['A', 'B'] test = ['A', 'B', 'C'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') encoder.fit(train) result = encoder.transform(test) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) message = 'inverse_transform is not supported because transform impute '\ 'the unknown category nan when encode city' with self.assertWarns(UserWarning, msg=message) as w: enc.inverse_transform(result) def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']}) expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(expected, original)
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import numpy as np import tests.helpers as th import category_encoders as encoders class TestOneHotEncoderTestCase(TestCase): def test_one_hot(self): X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) enc = encoders.OneHotEncoder(verbose=1, return_df=False) enc.fit(X) self.assertEqual(enc.transform(X_t).shape[1], enc.transform(X).shape[1], 'We have to get the same count of columns despite the presence of a new value') enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan') enc.fit(X) out = enc.transform(X_t) self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error') # The exception is already raised in fit() because transform() is called there to get # feature_names right. enc.fit(X) with self.assertRaises(ValueError): enc.transform(X_t) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True) enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_A', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) # test inverse_transform X_i = th.create_dataset(n_rows=100, has_missing=False) X_i_t = th.create_dataset(n_rows=50, has_missing=False) cols = ['underscore', 'none', 'extra', 'categorical'] enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols) enc.fit(X_i) obtained = enc.inverse_transform(enc.transform(X_i_t)) th.verify_inverse_transform(X_i_t, obtained) def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self): encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator', return_df=False) result = encoder.fit_transform([[-1]]) self.assertListEqual([[1, 0]], result.tolist()) def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator') value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')}) result = encoder.fit_transform(value) columns = result.columns.tolist() self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns)) def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1], 'city_-1': [0, 0]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0], 'city_-1': [0, 1]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingError(self): data_no_missing = ['A', 'B', 'B'] data_w_missing = [np.nan, 'B', 'B'] encoder = encoders.OneHotEncoder(handle_missing="error") result = encoder.fit_transform(data_no_missing) expected = [[1, 0], [0, 1], [0, 1]] self.assertEqual(result.values.tolist(), expected) self.assertRaisesRegex(ValueError, '.*null.*', encoder.transform, data_w_missing) self.assertRaisesRegex(ValueError, '.*null.*', encoder.fit, data_w_missing) def test_HandleMissingReturnNan(self): train = pd.DataFrame({'x': ['A', np.nan, 'B']}) encoder = encoders.OneHotEncoder(handle_missing='return_nan', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal( result, pd.DataFrame({'x_A': [1, np.nan, 0], 'x_B': [0, np.nan, 1]}), ) def test_HandleMissingIgnore(self): train = pd.DataFrame({'x': ['A', 'B', np.nan], 'y': ['A', None, 'A'], 'z': [np.NaN, 'B', 'B']}) train['z'] = train['z'].astype('category') expected_result = pd.DataFrame({'x_A': [1, 0, 0], 'x_B': [0, 1, 0], 'y_A': [1, 0, 1], 'z_B': [0, 1, 1]}) encoder = encoders.OneHotEncoder(handle_missing='ignore', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal(result, expected_result) def test_HandleMissingIgnore_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', np.NaN,'Geneva']}) expected_result = pd.DataFrame({'city_1': [1, 0, 0], 'city_3': [0, 0, 1]}) encoder = encoders.OneHotEncoder(handle_missing='ignore') result = encoder.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self): train = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self): train = ['A', 'B'] test = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') encoded_train = encoder.fit_transform(train) encoded_test = encoder.transform(test) expected_1 = [[1, 0, 0], [0, 1, 0]] self.assertEqual(encoded_train.values.tolist(), expected_1) expected_2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(encoded_test.values.tolist(), expected_2) def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self): train = ['A', 'B'] test = ['A', 'B', 'C'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') encoder.fit(train) result = encoder.transform(test) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) message = 'inverse_transform is not supported because transform impute '\ 'the unknown category nan when encode city' with self.assertWarns(UserWarning, msg=message) as w: enc.inverse_transform(result) def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']}) expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(expected, original)
woodly0
49c62c7b782b04f310a7d48c674b2ee6f3987541
f66949194ee608e532da5ef86c9078c66e40a145
and maybe also an assertion that the mapping is as expected (containing the nan)
PaulWestenthanner
40
scikit-learn-contrib/category_encoders
396
OneHotEncoder: Adding handle_missing='ignore' option
Closes #386 ## Proposed Changes - added **ignore** option to the `handle_missing` parameter of the `OneHotEncoder`. This will encode `NaN` values as 0 in every dummy column. However, compared to the **value** option, no additional "_nan" category is created. - added a simple test for the new option.
null
2023-01-23 15:57:00+00:00
2023-01-24 14:38:08+00:00
tests/test_one_hot.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import numpy as np import tests.helpers as th import category_encoders as encoders class TestOneHotEncoderTestCase(TestCase): def test_one_hot(self): X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) enc = encoders.OneHotEncoder(verbose=1, return_df=False) enc.fit(X) self.assertEqual(enc.transform(X_t).shape[1], enc.transform(X).shape[1], 'We have to get the same count of columns despite the presence of a new value') enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan') enc.fit(X) out = enc.transform(X_t) self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error') # The exception is already raised in fit() because transform() is called there to get # feature_names right. enc.fit(X) with self.assertRaises(ValueError): enc.transform(X_t) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True) enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_A', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) # test inverse_transform X_i = th.create_dataset(n_rows=100, has_missing=False) X_i_t = th.create_dataset(n_rows=50, has_missing=False) cols = ['underscore', 'none', 'extra', 'categorical'] enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols) enc.fit(X_i) obtained = enc.inverse_transform(enc.transform(X_i_t)) th.verify_inverse_transform(X_i_t, obtained) def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self): encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator', return_df=False) result = encoder.fit_transform([[-1]]) self.assertListEqual([[1, 0]], result.tolist()) def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator') value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')}) result = encoder.fit_transform(value) columns = result.columns.tolist() self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns)) def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1], 'city_-1': [0, 0]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0], 'city_-1': [0, 1]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingError(self): data_no_missing = ['A', 'B', 'B'] data_w_missing = [np.nan, 'B', 'B'] encoder = encoders.OneHotEncoder(handle_missing="error") result = encoder.fit_transform(data_no_missing) expected = [[1, 0], [0, 1], [0, 1]] self.assertEqual(result.values.tolist(), expected) self.assertRaisesRegex(ValueError, '.*null.*', encoder.transform, data_w_missing) self.assertRaisesRegex(ValueError, '.*null.*', encoder.fit, data_w_missing) def test_HandleMissingReturnNan(self): train = pd.DataFrame({'x': ['A', np.nan, 'B']}) encoder = encoders.OneHotEncoder(handle_missing='return_nan', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal( result, pd.DataFrame({'x_A': [1, np.nan, 0], 'x_B': [0, np.nan, 1]}), ) def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self): train = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self): train = ['A', 'B'] test = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') encoded_train = encoder.fit_transform(train) encoded_test = encoder.transform(test) expected_1 = [[1, 0, 0], [0, 1, 0]] self.assertEqual(encoded_train.values.tolist(), expected_1) expected_2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(encoded_test.values.tolist(), expected_2) def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self): train = ['A', 'B'] test = ['A', 'B', 'C'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') encoder.fit(train) result = encoder.transform(test) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) message = 'inverse_transform is not supported because transform impute '\ 'the unknown category nan when encode city' with self.assertWarns(UserWarning, msg=message) as w: enc.inverse_transform(result) def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']}) expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(expected, original)
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import numpy as np import tests.helpers as th import category_encoders as encoders class TestOneHotEncoderTestCase(TestCase): def test_one_hot(self): X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) enc = encoders.OneHotEncoder(verbose=1, return_df=False) enc.fit(X) self.assertEqual(enc.transform(X_t).shape[1], enc.transform(X).shape[1], 'We have to get the same count of columns despite the presence of a new value') enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan') enc.fit(X) out = enc.transform(X_t) self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error') # The exception is already raised in fit() because transform() is called there to get # feature_names right. enc.fit(X) with self.assertRaises(ValueError): enc.transform(X_t) enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True) enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_A', out.columns.values) enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator') enc.fit(X) out = enc.transform(X_t) self.assertIn('extra_-1', out.columns.values) # test inverse_transform X_i = th.create_dataset(n_rows=100, has_missing=False) X_i_t = th.create_dataset(n_rows=50, has_missing=False) cols = ['underscore', 'none', 'extra', 'categorical'] enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols) enc.fit(X_i) obtained = enc.inverse_transform(enc.transform(X_i_t)) th.verify_inverse_transform(X_i_t, obtained) def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self): encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator', return_df=False) result = encoder.fit_transform([[-1]]) self.assertListEqual([[1, 0]], result.tolist()) def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False) value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)}) transformed = encoder.fit_transform(value) inverse_transformed = encoder.inverse_transform(transformed) assert value.equals(inverse_transformed) def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self): encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator') value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')}) result = encoder.fit_transform(value) columns = result.columns.tolist() self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns)) def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1]}, columns=['city_1', 'city_2']) enc = encoders.OneHotEncoder(handle_unknown='value') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 1], 'city_-1': [0, 0]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self): train = pd.DataFrame({'city': ['Chicago', 'Seattle']}) test = pd.DataFrame({'city': ['Chicago', 'Detroit']}) expected_result = pd.DataFrame({'city_1': [1, 0], 'city_2': [0, 0], 'city_-1': [0, 1]}, columns=['city_1', 'city_2', 'city_-1']) enc = encoders.OneHotEncoder(handle_unknown='indicator') result = enc.fit(train).transform(test) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingError(self): data_no_missing = ['A', 'B', 'B'] data_w_missing = [np.nan, 'B', 'B'] encoder = encoders.OneHotEncoder(handle_missing="error") result = encoder.fit_transform(data_no_missing) expected = [[1, 0], [0, 1], [0, 1]] self.assertEqual(result.values.tolist(), expected) self.assertRaisesRegex(ValueError, '.*null.*', encoder.transform, data_w_missing) self.assertRaisesRegex(ValueError, '.*null.*', encoder.fit, data_w_missing) def test_HandleMissingReturnNan(self): train = pd.DataFrame({'x': ['A', np.nan, 'B']}) encoder = encoders.OneHotEncoder(handle_missing='return_nan', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal( result, pd.DataFrame({'x_A': [1, np.nan, 0], 'x_B': [0, np.nan, 1]}), ) def test_HandleMissingIgnore(self): train = pd.DataFrame({'x': ['A', 'B', np.nan], 'y': ['A', None, 'A'], 'z': [np.NaN, 'B', 'B']}) train['z'] = train['z'].astype('category') expected_result = pd.DataFrame({'x_A': [1, 0, 0], 'x_B': [0, 1, 0], 'y_A': [1, 0, 1], 'z_B': [0, 1, 1]}) encoder = encoders.OneHotEncoder(handle_missing='ignore', use_cat_names=True) result = encoder.fit_transform(train) pd.testing.assert_frame_equal(result, expected_result) def test_HandleMissingIgnore_ExpectMappingUsed(self): train = pd.DataFrame({'city': ['Chicago', np.NaN,'Geneva']}) expected_result = pd.DataFrame({'city_1': [1, 0, 0], 'city_3': [0, 0, 1]}) encoder = encoders.OneHotEncoder(handle_missing='ignore') result = encoder.fit(train).transform(train) pd.testing.assert_frame_equal(expected_result, result) def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self): train = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self): train = ['A', 'B'] test = ['A', 'B', np.nan] encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value') encoded_train = encoder.fit_transform(train) encoded_test = encoder.transform(test) expected_1 = [[1, 0, 0], [0, 1, 0]] self.assertEqual(encoded_train.values.tolist(), expected_1) expected_2 = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(encoded_test.values.tolist(), expected_2) def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self): train = ['A', 'B'] test = ['A', 'B', 'C'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') encoder.fit(train) result = encoder.transform(test) expected = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] self.assertEqual(result.values.tolist(), expected) def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self): train = ['A', 'B'] encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value') result = encoder.fit_transform(train) expected = [[1, 0, 0], [0, 1, 0]] self.assertEqual(result.values.tolist(), expected) def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value') result = enc.fit_transform(train) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) message = 'inverse_transform is not supported because transform impute '\ 'the unknown category nan when encode city' with self.assertWarns(UserWarning, msg=message) as w: enc.inverse_transform(result) def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', 'los angeles']}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(train, original) def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self): train = pd.DataFrame({'city': ['chicago', np.nan]}) test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']}) expected = pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan') enc.fit(train) result = enc.transform(test) original = enc.inverse_transform(result) pd.testing.assert_frame_equal(expected, original)
woodly0
49c62c7b782b04f310a7d48c674b2ee6f3987541
f66949194ee608e532da5ef86c9078c66e40a145
Thanks for your feedback. I've extended the existing test and added an additional one.
woodly0
41
scikit-learn-contrib/category_encoders
381
[DOC] Catboost docs reformulation
Connected to #337 and #351
null
2022-10-31 04:47:51+00:00
2022-11-01 20:33:10+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features.     Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.     CatBoostEncoder is the variation of target encoding. It supports     time-aware encoding, regularization, and online learning.     This implementation is time-aware (similar to CatBoost's parameter 'has_time=True'),     so no random permutations are used. It makes this encoder sensitive to     ordering of the data and suitable for time series problems. If your data     does not have time dependency, it should still work just fine, assuming     sorting of the data won't leak any information outside the training scope     (i.e., no data leakage). When data leakage is a possibility, it is wise to     eliminate it first (for example, shuffle or resample the data).     NOTE: behavior of the transformer would differ in transform and fit_transform     methods depending if y values are passed. If no target is passed, then     encoder will map the last value of the running mean to each category. If y is passed     then it will map all values of the running mean to each category's occurrences. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
glevv
c5dd2b7ac414eedda555b74207223beb2c557b05
0fd5d2836366237689bbb89699232e2f6acb40d6
there is a typo heere `CatBoosts`
PaulWestenthanner
42
scikit-learn-contrib/category_encoders
381
[DOC] Catboost docs reformulation
Connected to #337 and #351
null
2022-10-31 04:47:51+00:00
2022-11-01 20:33:10+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features.     Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.     CatBoostEncoder is the variation of target encoding. It supports     time-aware encoding, regularization, and online learning.     This implementation is time-aware (similar to CatBoost's parameter 'has_time=True'),     so no random permutations are used. It makes this encoder sensitive to     ordering of the data and suitable for time series problems. If your data     does not have time dependency, it should still work just fine, assuming     sorting of the data won't leak any information outside the training scope     (i.e., no data leakage). When data leakage is a possibility, it is wise to     eliminate it first (for example, shuffle or resample the data).     NOTE: behavior of the transformer would differ in transform and fit_transform     methods depending if y values are passed. If no target is passed, then     encoder will map the last value of the running mean to each category. If y is passed     then it will map all values of the running mean to each category's occurrences. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
glevv
c5dd2b7ac414eedda555b74207223beb2c557b05
0fd5d2836366237689bbb89699232e2f6acb40d6
Should we add that if you're not sure whether information leaks you can do a random permutation first?
PaulWestenthanner
43
scikit-learn-contrib/category_encoders
381
[DOC] Catboost docs reformulation
Connected to #337 and #351
null
2022-10-31 04:47:51+00:00
2022-11-01 20:33:10+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features.     Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.     CatBoostEncoder is the variation of target encoding. It supports     time-aware encoding, regularization, and online learning.     This implementation is time-aware (similar to CatBoost's parameter 'has_time=True'),     so no random permutations are used. It makes this encoder sensitive to     ordering of the data and suitable for time series problems. If your data     does not have time dependency, it should still work just fine, assuming     sorting of the data won't leak any information outside the training scope     (i.e., no data leakage). When data leakage is a possibility, it is wise to     eliminate it first (for example, shuffle or resample the data).     NOTE: behavior of the transformer would differ in transform and fit_transform     methods depending if y values are passed. If no target is passed, then     encoder will map the last value of the running mean to each category. If y is passed     then it will map all values of the running mean to each category's occurrences. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
glevv
c5dd2b7ac414eedda555b74207223beb2c557b05
0fd5d2836366237689bbb89699232e2f6acb40d6
I don't like the word `continue` here. The encoder is newly fitted according to target information. If I do something like ``` python enc = CatBoostEncoder() enc.fit_transform(X1, y1) enc.fit_transform(X2, y2) ``` the previous fit will be ignored. So `continue` might be misleading and `refit` might be a better wording. Do you agree?
PaulWestenthanner
44
scikit-learn-contrib/category_encoders
381
[DOC] Catboost docs reformulation
Connected to #337 and #351
null
2022-10-31 04:47:51+00:00
2022-11-01 20:33:10+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features.     Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.     CatBoostEncoder is the variation of target encoding. It supports     time-aware encoding, regularization, and online learning.     This implementation is time-aware (similar to CatBoost's parameter 'has_time=True'),     so no random permutations are used. It makes this encoder sensitive to     ordering of the data and suitable for time series problems. If your data     does not have time dependency, it should still work just fine, assuming     sorting of the data won't leak any information outside the training scope     (i.e., no data leakage). When data leakage is a possibility, it is wise to     eliminate it first (for example, shuffle or resample the data).     NOTE: behavior of the transformer would differ in transform and fit_transform     methods depending if y values are passed. If no target is passed, then     encoder will map the last value of the running mean to each category. If y is passed     then it will map all values of the running mean to each category's occurrences. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
glevv
c5dd2b7ac414eedda555b74207223beb2c557b05
0fd5d2836366237689bbb89699232e2f6acb40d6
I reworded it, I think it should be clearer
glevv
45
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
maybe add the corresponding import statement to make it a self contained example ``` >>> from category_encoders.datasets import load_compass``
PaulWestenthanner
46
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
why do we need this error? the current behaviour is that if you do not specify `cols` all categorical columns will be determined and encoded. So if i rely on this default and not specify columns this will break now right?
PaulWestenthanner
47
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
ah ok, in line 138 you have to know at init time what the columns will look like, determining them on the fly during fit-time will not be enough
PaulWestenthanner
48
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
I think we should document somewhere that in case of supplying a dataframe as hierarchy that the columns have to be called HIER_col_1, ... HIER_col_N. Also I'm not sure if 1 or N is the coarsest level (so probably also worth documenting)
PaulWestenthanner
49
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
this check is not specific to hierarchy right? if so it should be moved to the base encoder. Strange that we do not have such a check implemented already...
PaulWestenthanner
50
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
in the postcode example cols should be just `postcode` right? so specifying `cols=None` would not make any sense anyway since the HIER_postcode_N cols would be encoded as columns on their own instead of being recognized in the hierarchy. Then this is just fine to throw an error here
PaulWestenthanner
51
scikit-learn-contrib/category_encoders
373
Target encoding heirarchical columnwise
This pull request enhances hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress ## Proposed Changes Allows a user to submit a hierarchy within a dataframe (i.e. columnwise), not just a mapping dictionary. Columns must take the names HIER_colA_1, HIER_colA_2, HIER_colA_3, HIER_colB_1, HIER_colB_2. HIER_colC_1 etc where the last digit represents the level of hierarchy (top =1)
null
2022-10-04 08:10:13+00:00
2022-10-05 13:32:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict or dataframe A dictionary or a dataframe to define the hierarchy for mapping. If a dictionary, this contains a dict of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. If dataframe: a dataframe defining columns to be used for the hierarchies. Column names must take the form: HIER_colA_1, ... HIER_colA_N, HIER_colB_1, ... HIER_colB_M, ... where [colA, colB, ...] are given columns in cols list. 1:N and 1:M define the hierarchy for each column where 1 is the highest hierarchy (top of the tree). A single column or multiple can be used, as relevant. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> from category_encoders.datasets import load_compass >>> X, y = load_compass() >>> hierarchical_map = {'compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['compass']).fit(X.loc[:,['compass']], y) >>> hierarchy_dataset = enc.transform(X.loc[:,['compass']]) >>> print(hierarchy_dataset['compass'].values) [0.62263617 0.62263617 0.90382995 0.90382995 0.90382995 0.17660024 0.17660024 0.46051953 0.46051953 0.46051953 0.46051953 0.40332791 0.40332791 0.40332791 0.40332791 0.40332791] >>> X, y = load_postcodes('binary') >>> cols = ['postcode'] >>> HIER_cols = ['HIER_postcode_1','HIER_postcode_2','HIER_postcode_3','HIER_postcode_4'] >>> enc = TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode']).fit(X['postcode'], y) >>> hierarchy_dataset = enc.transform(X['postcode']) >>> print(hierarchy_dataset.loc[0:10, 'postcode'].values) [0.75063473 0.90208756 0.88328833 0.77041254 0.68891504 0.85012847 0.76772574 0.88742357 0.7933824 0.63776756 0.9019973 ] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if isinstance(hierarchy, (dict, pd.DataFrame)) and cols is None: raise ValueError('Hierarchy is defined but no columns are named for encoding') if isinstance(hierarchy, dict): self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} elif isinstance(hierarchy, pd.DataFrame): self.hierarchy = hierarchy self.hierarchy_depth = {} for col in self.cols: HIER_cols = self.hierarchy.columns[self.hierarchy.columns.str.startswith(f'HIER_{col}')].values HIER_levels = [int(i.replace(f'HIER_{col}_', '')) for i in HIER_cols] if np.array_equal(sorted(HIER_levels), np.arange(1, max(HIER_levels)+1)): self.hierarchy_depth[col] = max(HIER_levels) else: raise ValueError(f'Hierarchy columns are not complete for column {col}') elif hierarchy is None: self.hierarchy = hierarchy else: raise ValueError('Given hierarchy mapping is neither a dictionary nor a dataframe') self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) return min_tuple_size == max_tuple_size, min_tuple_size def _fit(self, X, y, **kwargs): if isinstance(self.hierarchy, dict): X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) elif isinstance(self.hierarchy, pd.DataFrame): X_hier = self.hierarchy if isinstance(self.hierarchy, (dict, pd.DataFrame)): enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy is not None: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if (isinstance(self.hierarchy, dict) and col in self.hierarchy) or \ (isinstance(self.hierarchy, pd.DataFrame)): for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
nercisla
81bb01d99a44624f117cf13bb7ef64ef55ee7f9d
a0d4748d1ecb6b343db079a42133a1d47263fa49
Agreed. We have moved this, hopefully to the right place.
nercisla
52
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
there seems to be a typo throughout the whole pull request, hierarchy is spelled with e and i switched right?
PaulWestenthanner
53
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
please also add the parameter to the docstring with a little explanation and type (guess it is dict mapping cols to the hierarchy levels right?)
PaulWestenthanner
54
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I don't think copying is needed here
PaulWestenthanner
55
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
`self.cols` gets declared in the `fit` method of the `BaseEncoder` before calling `_fit`. So it will be already declared at this point. I'm not sure if the additional columns that are created on the fly need to be added here as well.
PaulWestenthanner
56
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
does this work for a multi-hierarchy as well? e.g. imagine in the animal example you have also some insects and another super-category `mammal` which both `Feline` and `Canine`belong to but not the insects. I think this would be at least dependent on the order of the dictionary (which we don't want since dicts are generally unordered). Maybe we should just add a test case for that as well. I'm also happy if multi-hierarchy is not supported in this first stage but then an appropriate exception should be raised
PaulWestenthanner
57
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
better use `col.startswith` as this is more precise
PaulWestenthanner
58
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
what do you mean by "third dimension"? The dimension in the ordinal encoder are the number of columns that are encoded. So subtracting 1 here should be fine
PaulWestenthanner
59
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I would find this more readable if you write `prior` instead of `scalar`. This should be the same right?
PaulWestenthanner
60
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
this also seems to work for single-level hierarchies only, right? So at some point we'd need to raise an exception (probably when parsing the hierarchy map)
PaulWestenthanner
61
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
we had a discussion in this project recently where a pandas guy discouraged using `inplace` and actually removed all (or at least most of) `inplace` usages from this repo. I don't remember the exact reason but it made sense to me and we should probably follow suit here as well. So just use `X = X.drop(...)`
PaulWestenthanner
62
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
This is basically cleaning the ordinal encoder so that it never new there was hierarchy involved? I like the idea that all the additional columns should be hidden from the user
PaulWestenthanner
63
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Yes, you are correct. Though the word heir is spelt with i and e the opposite way around. ;)
nercisla
64
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
No, it's not needed. It was a remnant of an old solution that still required checking.
nercisla
65
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
This is also be a remnant of an old solution.
nercisla
66
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I have added this now.
nercisla
67
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Working on fixing the tests, Joe and I found a problem here, so the method is a little different now. We separate the OrdinalEncoders for the base column(s) and the hierarchy.
nercisla
68
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
At this stage, we only consider one level of hierarchy, but know how to change it, so can do that soon. Either this release or another.
nercisla
69
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Agreed.
nercisla
70
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
No, I don't believe that is true. The prior is taken over all the data, but the scalar is an estimate of the probability. The paper clearly defines it separately, so feel the language should follow.
nercisla
71
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Agreed.
nercisla
72
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
No problem. Though nicely this line has been dropped due to the cleaned up methodology :)
nercisla
73
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
As above, this line has been removed in the new method.
nercisla
74
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I'm fine with having it in another release. Let's get the first working version merged soon and then iterate from there
PaulWestenthanner
75
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
The variable called `scalar_heir` is defined separately and should keep its name. My comment war referring to the variable called `scalar` on the same line. This is even set equal to `prior` in line 155.
PaulWestenthanner
76
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
So, the scalar is normally set to the prior, but for multiple hierarchies, the scalar is a function. This matters more when we will have multi-level hierarchies, in which the "scalar" will definitely move away from the prior. So, in that sense, I think it matters that we don't call it the prior. The prior and scalar will be different and that also matters for the default values (-1 and -2) in the mapping.
nercisla
77
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
this library needs to be added to the setup.py as well. Do you think its possible to flatten the dictionary without a dependency to another library? I've quickly checked it and it does not seem to be super actively maintained (latest release is over one year ago and it officially does not support python 3.9 onwards)
PaulWestenthanner
78
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I don't like a capital D as variable name, something like `flattened_hierarchy` seems more telling
PaulWestenthanner
79
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
also in the `True` case you want to return the size right? otherwise `hierarchy_check[1]` might be undefined (c.f. line 113)
PaulWestenthanner
80
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
also the one-line statement seems bulky and I'd find a multiline statement more readable (but this might just be personal taste) ``` if min_tuple_size == max_tuple_size: return ... else: return .... ``
PaulWestenthanner
81
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
checking for types should be done using `isinstance(t, tuple)`
PaulWestenthanner
82
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
an f-string would be more readable here
PaulWestenthanner
83
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
this is also the reason why the tests are failing at the moment
PaulWestenthanner
84
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
you're right! thanks for that explanation
PaulWestenthanner
85
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I think the flatten-dict function is complete and possibly too extensive to re-write. We could ask the author if we could copy the required function into our utils, but I'd hesitate to re-write it. We tried and found flatten-dict to be better. Will add it into setiup.py for the moment, until we reach concensus.
nercisla
86
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Will change this!
nercisla
87
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
> also in the `True` case you want to return the size right? otherwise `hierarchy_check[1]` might be undefined (c.f. line 113) This actually does the job despite what it seems at first glance. The if/else come first then the min_size is added to either afterwards. So it still returns the min_size in both cases.
nercisla
88
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
But I agree it's not easily readable (even for me second time round) so will change it.
nercisla
89
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Agreed.
nercisla
90
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
aaah that's how it works. Thanks for changing anyway
PaulWestenthanner
91
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I think I just found a neat way: pandas apparently implements a function `json_normalize` that can be used to flatten dicts as well. This is very well maintained. For reference it's way number 3 here: https://www.freecodecamp.org/news/how-to-flatten-a-dictionary-in-python-in-4-different-ways/ I just tried it out and it seems to work: With this function ```python def flatten_reverse_dict(d): sep = "___" [flat_dict] = pd.json_normalize(d, sep=sep).to_dict(orient='records') reversed_flat_dict = {v: tuple(k.split(sep)) for k,v in flat_dict.items()} return reversed_flat_dict ``` we get the same results as otherwise ```python hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} flattend = flatten(hierarchy_multi_level["Animal"], inverse=True) flattend_pd = flatten_reverse_dict(hierarchy_multi_level["Animal"]) print(flattend_pd == flattend) # prints True ``` This would be just 4 lines. What do you think? Is this an acceptable solution for you?
PaulWestenthanner
92
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None def _fit(self, X, y, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X
"""Target Encoder""" import numpy as np import pandas as pd from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util import warnings __author__ = 'chappers' class TargetEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). hierarchy: dict a dictionary of columns to map into hierarchies. Dictionary key(s) should be the column name from X which requires mapping. For multiple hierarchical maps, this should be a dictionary of dictionaries. Examples ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None >>> X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'] >>> hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}} >>> y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] >>> enc = TargetEncoder(hierarchy=hierarchical_map).fit(X, y) >>> hierarchy_dataset = enc.transform(X) >>> print(hierarchy_dataset[0].values) [0.5 0.5 0.94039854 0.94039854 0.94039854 0.13447071 0.13447071 0.5 0.5 0.5 0.5 0.40179862 0.40179862 0.40179862 0.40179862 0.40179862] References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0, hierarchy=None): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if smoothing == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.mapping = None self._mean = None if hierarchy: self.hierarchy = {} self.hierarchy_depth = {} for switch in hierarchy: flattened_hierarchy = util.flatten_reverse_dict(hierarchy[switch]) hierarchy_check = self._check_dict_key_tuples(flattened_hierarchy) self.hierarchy_depth[switch] = hierarchy_check[1] if not hierarchy_check[0]: raise ValueError('Hierarchy mapping contains different levels for key "' + switch + '"') self.hierarchy[switch] = {(k if isinstance(t, tuple) else t): v for t, v in flattened_hierarchy.items() for k in t} else: self.hierarchy = hierarchy self.cols_hier = [] def _check_dict_key_tuples(self, d): min_tuple_size = min(len(v) for v in d.values()) max_tuple_size = max(len(v) for v in d.values()) if min_tuple_size == max_tuple_size: return True, min_tuple_size else: return False, min_tuple_size def _fit(self, X, y, **kwargs): if self.hierarchy: X_hier = pd.DataFrame() for switch in self.hierarchy: if switch in self.cols: colnames = [f'HIER_{str(switch)}_{str(i + 1)}' for i in range(self.hierarchy_depth[switch])] df = pd.DataFrame(X[str(switch)].map(self.hierarchy[str(switch)]).tolist(), index=X.index, columns=colnames) X_hier = pd.concat([X_hier, df], axis=1) enc_hier = OrdinalEncoder( verbose=self.verbose, cols=X_hier.columns, handle_unknown='value', handle_missing='value' ) enc_hier = enc_hier.fit(X_hier) X_hier_ordinal = enc_hier.transform(X_hier) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) if self.hierarchy: self.mapping = self.fit_target_encoding(pd.concat([X_ordinal, X_hier_ordinal], axis=1), y) else: self.mapping = self.fit_target_encoding(X_ordinal, y) def fit_target_encoding(self, X, y): mapping = {} prior = self._mean = y.mean() for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') if 'HIER_' not in str(col): values = switch.get('mapping') scalar = prior if self.hierarchy and col in self.hierarchy: for i in range(self.hierarchy_depth[col]): col_hier = 'HIER_'+str(col)+'_'+str(i+1) col_hier_m1 = col if i == self.hierarchy_depth[col]-1 else 'HIER_'+str(col)+'_'+str(i+2) if not X[col].equals(X[col_hier]) and len(X[col_hier].unique())>1: stats_hier = y.groupby(X[col_hier]).agg(['count', 'mean']) smoove_hier = self._weighting(stats_hier['count']) scalar_hier = scalar * (1 - smoove_hier) + stats_hier['mean'] * smoove_hier scalar_hier_long = X[[col_hier_m1, col_hier]].drop_duplicates() scalar_hier_long.index = np.arange(1, scalar_hier_long.shape[0]+1) scalar = scalar_hier_long[col_hier].map(scalar_hier.to_dict()) stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = self._weighting(stats['count']) smoothing = scalar * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = scalar if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def _transform(self, X, y=None): # Now X is the correct dimensions it works with pre fitted ordinal encoder X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) return X def target_encode(self, X_in): X = X_in.copy(deep=True) # Was not mapping extra columns as self.cols did not include new column for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def _weighting(self, n): # monotonically increasing function on n bounded between 0 and 1 return 1 / (1 + np.exp(-(n - self.min_samples_leaf) / self.smoothing))
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
A very nice solution! I have tested it, so will push it to the branch now. Nice work!
nercisla
93
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
this will just overwrite lines 117-125
PaulWestenthanner
94
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
another nice test would be to check with a "trivial hierarchy", i.e. mapping everything to a unique top level does the same thing as doing target encoding without any hierarchy
PaulWestenthanner
95
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
and maybe another test for multi-level hierarchies where we'd expect an error.
PaulWestenthanner
96
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Correct.
nercisla
97
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Yup, agreed, as this currently breaks, I think.
nercisla
98
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Do you mean a test to check it errors elegantly?
nercisla
99
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
yes, just an `self.assertRaises` statement that checks for the error of mulit-level hierarchies
PaulWestenthanner
100
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I just saw you've already implemented that. Very nice!
PaulWestenthanner
101
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
Why do you think this breaks? According to the paper this should be fine, right?
PaulWestenthanner
102
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
I just tried it out and it works fine. Here's a complete test. I'd be happy if you include it ```python def test_trivial_hierarchy(self): trivial_hierarchical_map = { 'Plant': { 'Plant': ('Rose', 'Daisy', 'Daffodil', 'Bluebell') } } enc_hier = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=trivial_hierarchical_map, cols=['Plant']) result_hier = enc_hier.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) enc_no_hier = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, cols=['Plant']) result_no_hier = enc_no_hier.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) pd.testing.assert_series_equal(result_hier["Plant"], result_no_hier["Plant"]) ```
PaulWestenthanner
103
scikit-learn-contrib/category_encoders
366
Target encoding hierarchical
Fixes #136 ## Proposed Changes This pull request implements feature hierarchies in Target Encoders. Author: @nercisla Current status: Work in Progress
null
2022-08-07 07:41:06+00:00
2022-09-06 16:18:16+00:00
tests/test_target_encoder.py
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0])
import pandas as pd from unittest import TestCase # or `from unittest import ...` if on Python 3.4+ import tests.helpers as th import numpy as np import category_encoders as encoders class TestTargetEncoder(TestCase): def setUp(self): self.hierarchical_cat_example = pd.DataFrame( { 'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target']) self.hierarchical_map = { 'Compass': { 'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W' }, 'Animal': { 'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf') }, 'Plant': { 'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch') }, } def test_target_encoder(self): np_X = th.create_array(n_rows=100) np_X_t = th.create_array(n_rows=50, extras=True) np_y = np.random.randn(np_X.shape[0]) > 0.5 np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5 X = th.create_dataset(n_rows=100) X_t = th.create_dataset(n_rows=50, extras=True) y = pd.DataFrame(np_y) y_t = pd.DataFrame(np_y_t) enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2) enc.fit(X, y) th.verify_numeric(enc.transform(X_t)) th.verify_numeric(enc.transform(X_t, y_t)) def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) encoder.fit(binary_cat_example, binary_cat_example['target']) trend_mapping = encoder.mapping['Trend'] ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping'] self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=1e-4) self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']]) self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=1e-4) def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self): k = 2 f = 10 binary_cat_example = pd.DataFrame( {'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]}) encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f) result = encoder.fit_transform(binary_cat_example, binary_cat_example['target']) values = result['Trend'].values self.assertAlmostEqual(0.5874, values[0], delta=1e-4) self.assertAlmostEqual(0.5874, values[1], delta=1e-4) self.assertAlmostEqual(0.4125, values[2], delta=1e-4) self.assertEqual(0.5, values[3]) def test_target_encoder_noncontiguous_index(self): data = pd.DataFrame({'x': ['a', 'b', np.nan, 'd', 'e'], 'y': range(5)}).dropna() result = encoders.TargetEncoder(cols=['x']).fit_transform(data[['x']], data['y']) self.assertTrue(np.allclose(result, 2.0)) def test_HandleMissingIsValueAndNanInTest_ExpectMean(self): df = pd.DataFrame({ 'color': ["a", "a", "a", "b", "b", "b"], 'outcome': [1.6, 0, 0, 1, 0, 1]}) train = df.drop('outcome', axis=1) target = df.drop('color', axis=1) test = pd.Series([np.nan, 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_missing='value') enc.fit(train, target['outcome']) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self): train = pd.Series(["a", "a", "a", "b", "b", "b"], name='color') target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target') test = pd.Series(['c', 'b'], name='color') test_target = pd.Series([0, 0]) enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value') enc.fit(train, target) obtained = enc.transform(test, test_target) self.assertEqual(.6, list(obtained['color'])[0]) def test_hierarchical_smoothing(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) def test_hierarchical_smoothing_multi(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertAlmostEqual(0.6827, values[0], delta=1e-4) self.assertAlmostEqual(0.3962, values[4], delta=1e-4) self.assertAlmostEqual(0.4460, values[7], delta=1e-4) values = result['Animal'].values self.assertAlmostEqual(0.7887, values[0], delta=1e-4) self.assertAlmostEqual(0.3248, values[5], delta=1e-4) self.assertAlmostEqual(0.6190, values[11], delta=1e-4) self.assertAlmostEqual(0.1309, values[13], delta=1e-4) self.assertAlmostEqual(0.7381, values[15], delta=1e-4) def test_hierarchical_part_named_cols(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Compass'].values self.assertAlmostEqual(0.6226, values[0], delta=1e-4) self.assertAlmostEqual(0.9038, values[2], delta=1e-4) self.assertAlmostEqual(0.1766, values[5], delta=1e-4) self.assertAlmostEqual(0.4605, values[7], delta=1e-4) self.assertAlmostEqual(0.4033, values[11], delta=1e-4) values = result['Speed'].values self.assertEqual('slow', values[0]) def test_hierarchy_pandas_index(self): df = pd.DataFrame({ 'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1] }, columns=pd.Index(['hello', 'world'])) cols = df.select_dtypes(include='object').columns self.hierarchical_map = { 'hello': { 'A': ('a', 'b'), 'B': ('c', 'd') }, } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map) result = enc.fit_transform(df, df['world']) values = result['hello'].values self.assertAlmostEqual(0.3616, values[0], delta=1e-4) self.assertAlmostEqual(0.4541, values[1], delta=1e-4) self.assertAlmostEqual(0.2425, values[2], delta=1e-4) self.assertAlmostEqual(0.7425, values[7], delta=1e-4) def test_hierarchy_single_mapping(self): enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_no_mapping(self): hierarchical_map = { 'Plant': { 'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell' } } enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target']) values = result['Plant'].values self.assertAlmostEqual(0.6828, values[0], delta=1e-4) self.assertAlmostEqual(0.5, values[4], delta=1e-4) self.assertAlmostEqual(0.5, values[8], delta=1e-4) self.assertAlmostEqual(0.3172, values[12], delta=1e-4) def test_hierarchy_error(self): hierarchical_map = { 'Plant': { 'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash' } } with self.assertRaises(ValueError): encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant']) def test_hierarchy_multi_level(self): hierarchy_multi_level_df = pd.DataFrame( { 'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1] }, columns=['Animal', 'target']) hierarchy_multi_level = { 'Animal': { 'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish') }, 'Cold-Blooded': {'Reptiles': ('Lizard'), 'Amphibians': ('Snake', 'Frog') } }} enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal']) result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target']) values = result['Animal'].values self.assertAlmostEqual(0.6261, values[0], delta=1e-4) self.assertAlmostEqual(0.9065, values[2], delta=1e-4) self.assertAlmostEqual(0.4107, values[5], delta=1e-4) self.assertAlmostEqual(0.3680, values[8], delta=1e-4) self.assertAlmostEqual(0.4626, values[11], delta=1e-4) self.assertAlmostEqual(0.2466, values[13], delta=1e-4) self.assertAlmostEqual(0.4741, values[14], delta=1e-4)
PaulWestenthanner
6a13c14919d56fed8177a173d4b3b82c5ea2fef5
30f05c0b584b453332a97389494f95b218495d32
@nercisla have you seen this comment?
PaulWestenthanner
104
scikit-learn-contrib/category_encoders
360
Upgrade versions
Fixes #359 ## Proposed Changes - drop support for python 3.5 and python 3.6 - drop support for pandas <1.0.5 - introduce f strings
null
2022-05-30 16:24:17+00:00
2022-05-31 12:57:43+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'chappers' class TargetEncoder(BaseEstimator, util.TransformerWithTargetMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int minimum samples to take category average into account. smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf self.smoothing = float(smoothing) # Make smoothing a float so that python 2 does not treat as integer division self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.feature_names = None def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] X_temp = self.transform(X) generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target info (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,)) if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""Target Encoder""" import warnings import numpy as np import pandas as pd from sklearn.base import BaseEstimator from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'chappers' class TargetEncoder(BaseEstimator, util.TransformerWithTargetMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if min_samples_leaf == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.feature_names = None def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] X_temp = self.transform(X) generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove # @ToDo delete this in version 2.6 smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target info (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
PaulWestenthanner
3ae37104c2a5884f701d518fab234a3cb8859dd0
a18cb64a81310a5e515c7b21255597b4dfb29b86
Isn't this a typo? Should be `if smoothing == 1.0` right?
freddyaboulton
105
scikit-learn-contrib/category_encoders
360
Upgrade versions
Fixes #359 ## Proposed Changes - drop support for python 3.5 and python 3.6 - drop support for pandas <1.0.5 - introduce f strings
null
2022-05-30 16:24:17+00:00
2022-05-31 12:57:43+00:00
category_encoders/target_encoder.py
"""Target Encoder""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'chappers' class TargetEncoder(BaseEstimator, util.TransformerWithTargetMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int minimum samples to take category average into account. smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf self.smoothing = float(smoothing) # Make smoothing a float so that python 2 does not treat as integer division self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.feature_names = None def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] X_temp = self.transform(X) generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target info (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,)) if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""Target Encoder""" import warnings import numpy as np import pandas as pd from sklearn.base import BaseEstimator from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'chappers' class TargetEncoder(BaseEstimator, util.TransformerWithTargetMixin): """Target encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. For the case of categorical target: features are replaced with a blend of posterior probability of the target given particular categorical value and the prior probability of the target over all the training data. For the case of continuous target: features are replaced with a blend of the expected value of the target given particular categorical value and the expected value of the target over all the training data. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. min_samples_leaf: int For regularization the weighted average between category mean and global mean is taken. The weight is an S-shaped curve between 0 and 1 with the number of samples for a category on the x-axis. The curve reaches 0.5 at min_samples_leaf. (parameter k in the original paper) smoothing: float smoothing effect to balance categorical average vs prior. Higher value means stronger regularization. The value must be strictly bigger than 0. Higher values mean a flatter S-curve (see min_samples_leaf). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = TargetEncoder(cols=['CHAS', 'RAD'], min_samples_leaf=20, smoothing=10).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, from https://dl.acm.org/citation.cfm?id=507538 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', min_samples_leaf=1, smoothing=1.0): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self.ordinal_encoder = None self.min_samples_leaf = min_samples_leaf if min_samples_leaf == 1: warnings.warn("Default parameter min_samples_leaf will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self.smoothing = smoothing if min_samples_leaf == 1.0: warnings.warn("Default parameter smoothing will change in version 2.6." "See https://github.com/scikit-learn-contrib/category_encoders/issues/327", category=FutureWarning) self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.feature_names = None def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self.mapping = self.fit_target_encoding(X_ordinal, y) X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] X_temp = self.transform(X) generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def fit_target_encoding(self, X, y): mapping = {} for switch in self.ordinal_encoder.category_mapping: col = switch.get('col') values = switch.get('mapping') prior = self._mean = y.mean() stats = y.groupby(X[col]).agg(['count', 'mean']) smoove = 1 / (1 + np.exp(-(stats['count'] - self.min_samples_leaf) / self.smoothing)) smoothing = prior * (1 - smoove) + stats['mean'] * smoove # @ToDo delete this in version 2.6 smoothing[stats['count'] == 1] = prior if self.handle_unknown == 'return_nan': smoothing.loc[-1] = np.nan elif self.handle_unknown == 'value': smoothing.loc[-1] = prior if self.handle_missing == 'return_nan': smoothing.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': smoothing.loc[-2] = prior mapping[col] = smoothing return mapping def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target info (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Unexpected categories found in dataframe') X = self.target_encode(X) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def target_encode(self, X_in): X = X_in.copy(deep=True) for col in self.cols: X[col] = X[col].map(self.mapping[col]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
PaulWestenthanner
3ae37104c2a5884f701d518fab234a3cb8859dd0
a18cb64a81310a5e515c7b21255597b4dfb29b86
yes this is a typo. You're right. I noticed it myself and already fixed it in this commit https://github.com/scikit-learn-contrib/category_encoders/commit/aed0fb2ce5a7ed2b11d04e891e523411c5f0e5fa.
PaulWestenthanner
106
scikit-learn-contrib/category_encoders
334
Fix pandas future warning for dropping invariants
When I use `drop_invariant=True`, I get a `FutureWarning` because `df.drop` is switching to named kwargs only. This fixes it. It should also be substantially more performant as the DF is only modified once.
null
2022-01-18 14:07:13+00:00
2022-02-14 07:40:26+00:00
category_encoders/backward_difference.py
"""Backward difference contrast encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from patsy.contrasts import Diff import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class BackwardDifferenceEncoder(BaseEstimator, TransformerMixin): """Backward difference contrast coding for encoding categorical variables. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BackwardDifferenceEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 21 columns): intercept 506 non-null int64 CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null float64 RAD_1 506 non-null float64 RAD_2 506 non-null float64 RAD_3 506 non-null float64 RAD_4 506 non-null float64 RAD_5 506 non-null float64 RAD_6 506 non-null float64 RAD_7 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(20), int64(1) memory usage: 83.1 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.mapping = mapping self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.cols = cols self.ordinal_encoder = None self._dim = None self.feature_names = None def fit(self, X, y=None, **kwargs): """Fits an ordinal encoder to produce a consistent mapping across applications and optionally finds generally invariant columns to drop consistently. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # if the input dataset isn't already a dataframe, convert it to one (using default column names) # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # train an ordinal pre-encoder self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) ordinal_mapping = self.ordinal_encoder.category_mapping mappings_out = [] for switch in ordinal_mapping: values = switch.get('mapping') col = switch.get('col') column_mapping = self.fit_backward_difference_coding(col, values, self.handle_missing, self.handle_unknown) mappings_out.append({'col': col, 'mapping': column_mapping, }) self.mapping = mappings_out X_temp = self.transform(X, override_return_df=True) self.feature_names = X_temp.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim, )) if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.backward_difference_coding(X, mapping=self.mapping) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values @staticmethod def fit_backward_difference_coding(col, values, handle_missing, handle_unknown): if handle_missing == 'value': values = values[values > 0] values_to_encode = values.values if len(values) < 2: return pd.DataFrame(index=values_to_encode) if handle_unknown == 'indicator': values_to_encode = np.append(values_to_encode, -1) backwards_difference_matrix = Diff().code_without_intercept(values_to_encode) df = pd.DataFrame(data=backwards_difference_matrix.matrix, index=values_to_encode, columns=[str(col) + '_%d' % (i, ) for i in range(len(backwards_difference_matrix.column_suffixes))]) if handle_unknown == 'return_nan': df.loc[-1] = np.nan elif handle_unknown == 'value': df.loc[-1] = np.zeros(len(values_to_encode) - 1) if handle_missing == 'return_nan': df.loc[values.loc[np.nan]] = np.nan elif handle_missing == 'value': df.loc[-2] = np.zeros(len(values_to_encode) - 1) return df @staticmethod def backward_difference_coding(X_in, mapping): """ """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() X['intercept'] = pd.Series([1] * X.shape[0], index=X.index) for switch in mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df.set_index(X.index, inplace=True) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns cols = ['intercept'] + cols return X.reindex(columns=cols) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""Backward difference contrast encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from patsy.contrasts import Diff import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class BackwardDifferenceEncoder(BaseEstimator, TransformerMixin): """Backward difference contrast coding for encoding categorical variables. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BackwardDifferenceEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 21 columns): intercept 506 non-null int64 CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null float64 RAD_1 506 non-null float64 RAD_2 506 non-null float64 RAD_3 506 non-null float64 RAD_4 506 non-null float64 RAD_5 506 non-null float64 RAD_6 506 non-null float64 RAD_7 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(20), int64(1) memory usage: 83.1 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.mapping = mapping self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.cols = cols self.ordinal_encoder = None self._dim = None self.feature_names = None def fit(self, X, y=None, **kwargs): """Fits an ordinal encoder to produce a consistent mapping across applications and optionally finds generally invariant columns to drop consistently. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # if the input dataset isn't already a dataframe, convert it to one (using default column names) # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # train an ordinal pre-encoder self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) ordinal_mapping = self.ordinal_encoder.category_mapping mappings_out = [] for switch in ordinal_mapping: values = switch.get('mapping') col = switch.get('col') column_mapping = self.fit_backward_difference_coding(col, values, self.handle_missing, self.handle_unknown) mappings_out.append({'col': col, 'mapping': column_mapping, }) self.mapping = mappings_out X_temp = self.transform(X, override_return_df=True) self.feature_names = X_temp.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim, )) if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.backward_difference_coding(X, mapping=self.mapping) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values @staticmethod def fit_backward_difference_coding(col, values, handle_missing, handle_unknown): if handle_missing == 'value': values = values[values > 0] values_to_encode = values.values if len(values) < 2: return pd.DataFrame(index=values_to_encode) if handle_unknown == 'indicator': values_to_encode = np.append(values_to_encode, -1) backwards_difference_matrix = Diff().code_without_intercept(values_to_encode) df = pd.DataFrame(data=backwards_difference_matrix.matrix, index=values_to_encode, columns=[str(col) + '_%d' % (i, ) for i in range(len(backwards_difference_matrix.column_suffixes))]) if handle_unknown == 'return_nan': df.loc[-1] = np.nan elif handle_unknown == 'value': df.loc[-1] = np.zeros(len(values_to_encode) - 1) if handle_missing == 'return_nan': df.loc[values.loc[np.nan]] = np.nan elif handle_missing == 'value': df.loc[-2] = np.zeros(len(values_to_encode) - 1) return df @staticmethod def backward_difference_coding(X_in, mapping): """ """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() X['intercept'] = pd.Series([1] * X.shape[0], index=X.index) for switch in mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df.set_index(X.index, inplace=True) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns cols = ['intercept'] + cols return X.reindex(columns=cols) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
jona-sassenhagen
531a271424fcea26df380709ac5700fb2d88c33c
d737e1758b5b206286251cf79cf35f1496154e56
Why'd you remove `inplace=True` here?
zachmayer
107
scikit-learn-contrib/category_encoders
334
Fix pandas future warning for dropping invariants
When I use `drop_invariant=True`, I get a `FutureWarning` because `df.drop` is switching to named kwargs only. This fixes it. It should also be substantially more performant as the DF is only modified once.
null
2022-01-18 14:07:13+00:00
2022-02-14 07:40:26+00:00
category_encoders/backward_difference.py
"""Backward difference contrast encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from patsy.contrasts import Diff import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class BackwardDifferenceEncoder(BaseEstimator, TransformerMixin): """Backward difference contrast coding for encoding categorical variables. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BackwardDifferenceEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 21 columns): intercept 506 non-null int64 CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null float64 RAD_1 506 non-null float64 RAD_2 506 non-null float64 RAD_3 506 non-null float64 RAD_4 506 non-null float64 RAD_5 506 non-null float64 RAD_6 506 non-null float64 RAD_7 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(20), int64(1) memory usage: 83.1 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.mapping = mapping self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.cols = cols self.ordinal_encoder = None self._dim = None self.feature_names = None def fit(self, X, y=None, **kwargs): """Fits an ordinal encoder to produce a consistent mapping across applications and optionally finds generally invariant columns to drop consistently. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # if the input dataset isn't already a dataframe, convert it to one (using default column names) # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # train an ordinal pre-encoder self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) ordinal_mapping = self.ordinal_encoder.category_mapping mappings_out = [] for switch in ordinal_mapping: values = switch.get('mapping') col = switch.get('col') column_mapping = self.fit_backward_difference_coding(col, values, self.handle_missing, self.handle_unknown) mappings_out.append({'col': col, 'mapping': column_mapping, }) self.mapping = mappings_out X_temp = self.transform(X, override_return_df=True) self.feature_names = X_temp.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim, )) if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.backward_difference_coding(X, mapping=self.mapping) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values @staticmethod def fit_backward_difference_coding(col, values, handle_missing, handle_unknown): if handle_missing == 'value': values = values[values > 0] values_to_encode = values.values if len(values) < 2: return pd.DataFrame(index=values_to_encode) if handle_unknown == 'indicator': values_to_encode = np.append(values_to_encode, -1) backwards_difference_matrix = Diff().code_without_intercept(values_to_encode) df = pd.DataFrame(data=backwards_difference_matrix.matrix, index=values_to_encode, columns=[str(col) + '_%d' % (i, ) for i in range(len(backwards_difference_matrix.column_suffixes))]) if handle_unknown == 'return_nan': df.loc[-1] = np.nan elif handle_unknown == 'value': df.loc[-1] = np.zeros(len(values_to_encode) - 1) if handle_missing == 'return_nan': df.loc[values.loc[np.nan]] = np.nan elif handle_missing == 'value': df.loc[-2] = np.zeros(len(values_to_encode) - 1) return df @staticmethod def backward_difference_coding(X_in, mapping): """ """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() X['intercept'] = pd.Series([1] * X.shape[0], index=X.index) for switch in mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df.set_index(X.index, inplace=True) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns cols = ['intercept'] + cols return X.reindex(columns=cols) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""Backward difference contrast encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from patsy.contrasts import Diff import numpy as np from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class BackwardDifferenceEncoder(BaseEstimator, TransformerMixin): """Backward difference contrast coding for encoding categorical variables. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BackwardDifferenceEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 21 columns): intercept 506 non-null int64 CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null float64 RAD_1 506 non-null float64 RAD_2 506 non-null float64 RAD_3 506 non-null float64 RAD_4 506 non-null float64 RAD_5 506 non-null float64 RAD_6 506 non-null float64 RAD_7 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(20), int64(1) memory usage: 83.1 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.mapping = mapping self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.cols = cols self.ordinal_encoder = None self._dim = None self.feature_names = None def fit(self, X, y=None, **kwargs): """Fits an ordinal encoder to produce a consistent mapping across applications and optionally finds generally invariant columns to drop consistently. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # if the input dataset isn't already a dataframe, convert it to one (using default column names) # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # train an ordinal pre-encoder self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) ordinal_mapping = self.ordinal_encoder.category_mapping mappings_out = [] for switch in ordinal_mapping: values = switch.get('mapping') col = switch.get('col') column_mapping = self.fit_backward_difference_coding(col, values, self.handle_missing, self.handle_unknown) mappings_out.append({'col': col, 'mapping': column_mapping, }) self.mapping = mappings_out X_temp = self.transform(X, override_return_df=True) self.feature_names = X_temp.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim, )) if not list(self.cols): return X X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.backward_difference_coding(X, mapping=self.mapping) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values @staticmethod def fit_backward_difference_coding(col, values, handle_missing, handle_unknown): if handle_missing == 'value': values = values[values > 0] values_to_encode = values.values if len(values) < 2: return pd.DataFrame(index=values_to_encode) if handle_unknown == 'indicator': values_to_encode = np.append(values_to_encode, -1) backwards_difference_matrix = Diff().code_without_intercept(values_to_encode) df = pd.DataFrame(data=backwards_difference_matrix.matrix, index=values_to_encode, columns=[str(col) + '_%d' % (i, ) for i in range(len(backwards_difference_matrix.column_suffixes))]) if handle_unknown == 'return_nan': df.loc[-1] = np.nan elif handle_unknown == 'value': df.loc[-1] = np.zeros(len(values_to_encode) - 1) if handle_missing == 'return_nan': df.loc[values.loc[np.nan]] = np.nan elif handle_missing == 'value': df.loc[-2] = np.zeros(len(values_to_encode) - 1) return df @staticmethod def backward_difference_coding(X_in, mapping): """ """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() X['intercept'] = pd.Series([1] * X.shape[0], index=X.index) for switch in mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df.set_index(X.index, inplace=True) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns cols = ['intercept'] + cols return X.reindex(columns=cols) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
jona-sassenhagen
531a271424fcea26df380709ac5700fb2d88c33c
d737e1758b5b206286251cf79cf35f1496154e56
I think the pandas dev team is not very happy with `inplace` and there's semi-regular attempts to get rid of it in general. I personally think `inplace` is not the most readable and transparent way to write pandas code and I try to avoid it. There's an extensive discussion here: https://github.com/pandas-dev/pandas/issues/16529
jona-sassenhagen
108
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/binary.py
"""Binary encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin import category_encoders as ce __author__ = 'willmcginnis' class BinaryEncoder(BaseEstimator, TransformerMixin): """Binary encoding for categorical variables, similar to onehot, but stores categories as binary bitstrings. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BinaryEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 18 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null int64 CHAS_1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null int64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(7) memory usage: 71.3 KB None """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.verbose = verbose self.cols = cols self.mapping = mapping self.drop_invariant = drop_invariant self.return_df = return_df self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.base_n_encoder = ce.BaseNEncoder(base=2, verbose=self.verbose, cols=self.cols, mapping=self.mapping, drop_invariant=self.drop_invariant, return_df=self.return_df, handle_unknown=self.handle_unknown, handle_missing=self.handle_missing) def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self.base_n_encoder.fit(X, y, **kwargs) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ return self.base_n_encoder.transform(X) def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ return self.base_n_encoder.inverse_transform(X_in) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ return self.base_n_encoder.get_feature_names()
"""Binary encoding""" from functools import partialmethod from category_encoders import utils from category_encoders.basen import BaseNEncoder __author__ = 'willmcginnis' class BinaryEncoder(BaseNEncoder): """Binary encoding for categorical variables, similar to onehot, but stores categories as binary bitstrings. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BinaryEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 18 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null int64 CHAS_1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null int64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(7) memory usage: 71.3 KB None """ encoding_relation = utils.EncodingRelation.ONE_TO_M __init__ = partialmethod(BaseNEncoder.__init__, base=2)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
I think it should be fine, but have you checked that cloning and get/set params work?
bmreiniger
109
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/binary.py
"""Binary encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin import category_encoders as ce __author__ = 'willmcginnis' class BinaryEncoder(BaseEstimator, TransformerMixin): """Binary encoding for categorical variables, similar to onehot, but stores categories as binary bitstrings. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BinaryEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 18 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null int64 CHAS_1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null int64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(7) memory usage: 71.3 KB None """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.verbose = verbose self.cols = cols self.mapping = mapping self.drop_invariant = drop_invariant self.return_df = return_df self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.base_n_encoder = ce.BaseNEncoder(base=2, verbose=self.verbose, cols=self.cols, mapping=self.mapping, drop_invariant=self.drop_invariant, return_df=self.return_df, handle_unknown=self.handle_unknown, handle_missing=self.handle_missing) def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self.base_n_encoder.fit(X, y, **kwargs) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ return self.base_n_encoder.transform(X) def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ return self.base_n_encoder.inverse_transform(X_in) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ return self.base_n_encoder.get_feature_names()
"""Binary encoding""" from functools import partialmethod from category_encoders import utils from category_encoders.basen import BaseNEncoder __author__ = 'willmcginnis' class BinaryEncoder(BaseNEncoder): """Binary encoding for categorical variables, similar to onehot, but stores categories as binary bitstrings. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BinaryEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 18 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null int64 CHAS_1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null int64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(7) memory usage: 71.3 KB None """ encoding_relation = utils.EncodingRelation.ONE_TO_M __init__ = partialmethod(BaseNEncoder.__init__, base=2)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
what do you mean by cloning? I just checked get/set params works. Also tests still work
PaulWestenthanner
110
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/binary.py
"""Binary encoding""" import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin import category_encoders as ce __author__ = 'willmcginnis' class BinaryEncoder(BaseEstimator, TransformerMixin): """Binary encoding for categorical variables, similar to onehot, but stores categories as binary bitstrings. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BinaryEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 18 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null int64 CHAS_1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null int64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(7) memory usage: 71.3 KB None """ def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'): self.verbose = verbose self.cols = cols self.mapping = mapping self.drop_invariant = drop_invariant self.return_df = return_df self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.base_n_encoder = ce.BaseNEncoder(base=2, verbose=self.verbose, cols=self.cols, mapping=self.mapping, drop_invariant=self.drop_invariant, return_df=self.return_df, handle_unknown=self.handle_unknown, handle_missing=self.handle_missing) def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self.base_n_encoder.fit(X, y, **kwargs) return self def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ return self.base_n_encoder.transform(X) def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ return self.base_n_encoder.inverse_transform(X_in) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ return self.base_n_encoder.get_feature_names()
"""Binary encoding""" from functools import partialmethod from category_encoders import utils from category_encoders.basen import BaseNEncoder __author__ = 'willmcginnis' class BinaryEncoder(BaseNEncoder): """Binary encoding for categorical variables, similar to onehot, but stores categories as binary bitstrings. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = BinaryEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 18 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_0 506 non-null int64 CHAS_1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_0 506 non-null int64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(7) memory usage: 71.3 KB None """ encoding_relation = utils.EncodingRelation.ONE_TO_M __init__ = partialmethod(BaseNEncoder.__init__, base=2)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
`sklearn.base.clone` gets used to copy an estimator, keeping hyperparameters but dropping fitted parameters. It uses `get_params` and then instantiates a new class of the same type using the gotten parameters, so I expect this is fine if `get_params` works.
bmreiniger
111
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.random_state = random_state self.sigma = sigma self.feature_names = None self.a = a def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') categories = self._fit( X, y, cols=self.cols ) self.mapping = categories X_temp = self.transform(X, y, override_return_df=True) self.feature_names = X_temp.columns.tolist() if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target information (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self._transform( X, y, mapping=self.mapping ) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit(self, X_in, y, cols=None): X = X_in.copy(deep=True) if cols is None: cols = X.columns.values self._mean = y.mean() return {col: self._fit_column_map(X[col], y) for col in cols} def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)])) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map) def _transform(self, X_in, y, mapping=None): """ The model uses a single column of floats to represent the means of the target variables. """ X = X_in.copy(deep=True) random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X_in[col].isnull() is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
```suggestion nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean ```
jona-sassenhagen
112
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.random_state = random_state self.sigma = sigma self.feature_names = None self.a = a def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') categories = self._fit( X, y, cols=self.cols ) self.mapping = categories X_temp = self.transform(X, y, override_return_df=True) self.feature_names = X_temp.columns.tolist() if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target information (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self._transform( X, y, mapping=self.mapping ) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit(self, X_in, y, cols=None): X = X_in.copy(deep=True) if cols is None: cols = X.columns.values self._mean = y.mean() return {col: self._fit_column_map(X[col], y) for col in cols} def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)])) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map) def _transform(self, X_in, y, mapping=None): """ The model uses a single column of floats to represent the means of the target variables. """ X = X_in.copy(deep=True) random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X_in[col].isnull() is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
Isn't this just ```suggestion return_map = pd.Series(dict(enumerate(categories))) ```
jona-sassenhagen
113
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.random_state = random_state self.sigma = sigma self.feature_names = None self.a = a def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') categories = self._fit( X, y, cols=self.cols ) self.mapping = categories X_temp = self.transform(X, y, override_return_df=True) self.feature_names = X_temp.columns.tolist() if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target information (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self._transform( X, y, mapping=self.mapping ) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit(self, X_in, y, cols=None): X = X_in.copy(deep=True) if cols is None: cols = X.columns.values self._mean = y.mean() return {col: self._fit_column_map(X[col], y) for col in cols} def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)])) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map) def _transform(self, X_in, y, mapping=None): """ The model uses a single column of floats to represent the means of the target variables. """ X = X_in.copy(deep=True) random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X_in[col].isnull() is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
thanks for that suggestion. I've implemented it
PaulWestenthanner
114
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.random_state = random_state self.sigma = sigma self.feature_names = None self.a = a def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') categories = self._fit( X, y, cols=self.cols ) self.mapping = categories X_temp = self.transform(X, y, override_return_df=True) self.feature_names = X_temp.columns.tolist() if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target information (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self._transform( X, y, mapping=self.mapping ) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit(self, X_in, y, cols=None): X = X_in.copy(deep=True) if cols is None: cols = X.columns.values self._mean = y.mean() return {col: self._fit_column_map(X[col], y) for col in cols} def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)])) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map) def _transform(self, X_in, y, mapping=None): """ The model uses a single column of floats to represent the means of the target variables. """ X = X_in.copy(deep=True) random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X_in[col].isnull() is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
thanks!
PaulWestenthanner
115
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.random_state = random_state self.sigma = sigma self.feature_names = None self.a = a def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') categories = self._fit( X, y, cols=self.cols ) self.mapping = categories X_temp = self.transform(X, y, override_return_df=True) self.feature_names = X_temp.columns.tolist() if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target information (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self._transform( X, y, mapping=self.mapping ) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit(self, X_in, y, cols=None): X = X_in.copy(deep=True) if cols is None: cols = X.columns.values self._mean = y.mean() return {col: self._fit_column_map(X[col], y) for col in cols} def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)])) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map) def _transform(self, X_in, y, mapping=None): """ The model uses a single column of floats to represent the means of the target variables. """ X = X_in.copy(deep=True) random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X_in[col].isnull() is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
You've added a tag `predict_depends_on_y`, but don't seem to use it anywhere; is there a later purpose this serves? Shouldn't it be `transform_depends_on_y` anyway?
bmreiniger
116
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/cat_boost.py
"""CatBoost coding""" import numpy as np import pandas as pd from sklearn.base import BaseEstimator import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self._mean = None self.random_state = random_state self.sigma = sigma self.feature_names = None self.a = a def fit(self, X, y, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # unite the input into pandas types X, y = util.convert_inputs(X, y) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') categories = self._fit( X, y, cols=self.cols ) self.mapping = categories X_temp = self.transform(X, y, override_return_df=True) self.feature_names = X_temp.columns.tolist() if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] when transform by leave one out None, when transform without target information (such as transform test set) Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError('Must train encoder before it can be used to transform data.') # unite the input into pandas types X, y = util.convert_inputs(X, y) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') if not list(self.cols): return X X = self._transform( X, y, mapping=self.mapping ) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit(self, X_in, y, cols=None): X = X_in.copy(deep=True) if cols is None: cols = X.columns.values self._mean = y.mean() return {col: self._fit_column_map(X[col], y) for col in cols} def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)])) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map) def _transform(self, X_in, y, mapping=None): """ The model uses a single column of floats to represent the means of the target variables. """ X = X_in.copy(deep=True) random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X_in[col].isnull() is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError('Must fit data first. Affected feature names are not known before.') else: return self.feature_names
"""CatBoost coding""" import numpy as np import pandas as pd import category_encoders.utils as util from sklearn.utils.random import check_random_state __author__ = 'Jan Motl' class CatBoostEncoder(util.BaseEncoder, util.SupervisedTransformerMixin): """CatBoost Encoding for categorical features. Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper. This is very similar to leave-one-out encoding, but calculates the values "on-the-fly". Consequently, the values naturally vary during the training phase and it is not necessary to add random noise. Beware, the training data have to be randomly permutated. E.g.: # Random permutation perm = np.random.permutation(len(X)) X = X.iloc[perm].reset_index(drop=True) y = y.iloc[perm].reset_index(drop=True) This is necessary because some data sets are sorted based on the target value and this coder encodes the features on-the-fly in a single pass. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. handle_unknown: str options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean. sigma: float adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched). sigma gives the standard deviation (spread or "width") of the normal distribution. a: float additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null float64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null float64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(13) memory usage: 51.5 KB None References ---------- .. [1] Transforming categorical features to numerical features, from https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ .. [2] CatBoost: unbiased boosting with categorical features, from https://arxiv.org/abs/1706.09516 """ prefit_ordinal = False encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1): super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self._mean = None self.random_state = random_state self.sigma = sigma self.a = a def _fit(self, X, y, **kwargs): X = X.copy(deep=True) self._mean = y.mean() self.mapping = {col: self._fit_column_map(X[col], y) for col in self.cols} def _transform(self, X, y=None): random_state_ = check_random_state(self.random_state) # Prepare the data if y is not None: # Convert bools to numbers (the target must be summable) y = y.astype('double') for col, colmap in self.mapping.items(): level_notunique = colmap['count'] > 1 unique_train = colmap.index unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype) is_nan = X[col].isnull() is_unknown_value = X[col].isin(unseen_values.dropna().astype(object)) if self.handle_unknown == 'error' and is_unknown_value.any(): raise ValueError('Columns to be encoded can not contain new values') if y is None: # Replace level with its mean target; if level occurs only once, use global mean level_means = ((colmap['sum'] + self._mean * self.a) / (colmap['count'] + self.a)).where(level_notunique, self._mean) X[col] = X[col].map(level_means) else: # Simulation of CatBoost implementation, which calculates leave-one-out on the fly. # The nice thing about this is that it helps to prevent overfitting. The bad thing # is that CatBoost uses many iterations over the data. But we run just one iteration. # Still, it works better than leave-one-out without any noise. # See: # https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/ # Cumsum does not work nicely with None (while cumcount does). # As a workaround, we cast the grouping column as string. # See: issue #209 temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount']) X[col] = (temp['cumsum'] - y + self._mean * self.a) / (temp['cumcount'] + self.a) if self.handle_unknown == 'value': if X[col].dtype.name == 'category': X[col] = X[col].astype(float) X.loc[is_unknown_value, col] = self._mean elif self.handle_unknown == 'return_nan': X.loc[is_unknown_value, col] = np.nan if self.handle_missing == 'value': # only set value if there are actually missing values. # In case of pd.Categorical columns setting values that are not seen in pd.Categorical gives an error. nan_cond = is_nan & unseen_values.isnull().any() if nan_cond.any(): X.loc[nan_cond, col] = self._mean elif self.handle_missing == 'return_nan': X.loc[is_nan, col] = np.nan if self.sigma is not None and y is not None: X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0]) return X def _more_tags(self): tags = super()._more_tags() tags["predict_depends_on_y"] = True return tags def _fit_column_map(self, series, y): category = pd.Categorical(series) categories = category.categories codes = category.codes.copy() codes[codes == -1] = len(categories) categories = np.append(categories, np.nan) return_map = pd.Series(dict(enumerate(categories))) result = y.groupby(codes).agg(['sum', 'count']) return result.rename(return_map)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
This was also introduced for "dynamic docstrings" and kept since it introduces useful information (also here you could imagine a test, also testing that the encoders where transform does not depend on y actually give the same result with and without y). I agree that `transform_depends_on_y` is a more suitable name
PaulWestenthanner
117
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/count.py
"""Count Encoder""" from __future__ import division import numpy as np import pandas as pd import category_encoders.utils as util from category_encoders.ordinal import OrdinalEncoder from copy import copy from sklearn.base import BaseEstimator, TransformerMixin __author__ = 'joshua t. dunn' class CountEncoder(BaseEstimator, TransformerMixin): def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', min_group_size=None, combine_min_nan_groups=None, min_group_name=None, normalize=False): """Count encoding for categorical features. For a given categorical feature, replace the names of the groups with the group counts. Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. normalize: bool or dict of {column : bool, ...}. whether to normalize the counts to the range (0, 1). See Pandas `value_counts` for more details. min_group_size: int, float or dict of {column : option, ...}. the minimal count threshold of a group needed to ensure it is not combined into a "leftovers" group. Default value is 0.01. If float in the range (0, 1), `min_group_size` is calculated as int(X.shape[0] * min_group_size). Note: This value may change type based on the `normalize` variable. If True this will become a float. If False, it will be an int. min_group_name: None, str or dict of {column : option, ...}. Set the name of the combined minimum groups when the defaults become too long. Default None. In this case the category names will be joined alphabetically with a `_` delimiter. Note: The default name can be long and may keep changing, for example, in cross-validation. combine_min_nan_groups: bool or dict of {column : bool, ...}. whether to combine the leftovers group with NaN group. Default True. Can also be forced to combine with 'force' meaning small groups are effectively counted as NaNs. Force can only used when 'handle_missing' is 'value' or 'error'. Note: Will not force if it creates an binary or invariant column. Example ------- >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> from category_encoders import CountEncoder >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(2) memory usage: 51.5 KB None References ---------- """ self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.normalize = normalize self.min_group_size = min_group_size self.min_group_name = min_group_name self.combine_min_nan_groups = combine_min_nan_groups self.feature_names = None self.ordinal_encoder = None self._check_set_create_attrs() self._min_group_categories = {} self._normalize = {} self._min_group_name = {} self._combine_min_nan_groups = {} self._min_group_size = {} self._handle_unknown = {} self._handle_missing = {} def fit(self, X, y=None, **kwargs): """Fit encoder according to X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self._check_set_create_dict_attrs() self._fit_count_encode(X_ordinal, y) X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [ x for x in generated_cols if X_temp[x].var() <= 10e-5 ] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.' ) # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError( 'Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,) ) if not list(self.cols): return X X, _ = self._transform_count_encode(X, y) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit_count_encode(self, X_in, y): """Perform the count encoding.""" X = X_in.copy(deep=True) if self.cols is None: self.cols = X.columns.values self.mapping = {} for col in self.cols: mapping_values = X[col].value_counts(normalize=self._normalize[col]) ordinal_encoding = [m["mapping"] for m in self.ordinal_encoder.mapping if m["col"] == col][0] reversed_ordinal_enc = {v: k for k, v in ordinal_encoding.to_dict().items()} mapping_values.index = mapping_values.index.map(reversed_ordinal_enc) self.mapping[col] = mapping_values if self._handle_missing[col] == 'return_nan': self.mapping[col][np.NaN] = np.NaN # elif self._handle_missing[col] == 'value': #test_count.py failing self.mapping[col].loc[-2] = 0 if any([val is not None for val in self._min_group_size.values()]): self.combine_min_categories(X) def _transform_count_encode(self, X_in, y): """Perform the transform count encoding.""" X = X_in.copy(deep=True) for col in self.cols: # Treat None as np.nan X[col] = pd.Series([el if el is not None else np.NaN for el in X[col]], index=X[col].index) if self.handle_missing == "value": if not util.is_category(X[col].dtype): X[col] = X[col].fillna(np.nan) if self._min_group_size is not None: if col in self._min_group_categories.keys(): X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col]) X[col] = X[col].astype(object).map(self.mapping[col]) if isinstance(self._handle_unknown[col], (int, np.integer)): X[col] = X[col].fillna(self._handle_unknown[col]) elif (self._handle_unknown[col] == 'value' and X[col].isna().any() and self._handle_missing[col] != 'return_nan' ): X[col].replace(np.nan, 0, inplace=True) elif ( self._handle_unknown[col] == 'error' and X[col].isnull().any() ): raise ValueError(f'Missing data found in column {col} at transform time.') return X, self.mapping def combine_min_categories(self, X): """Combine small categories into a single category.""" for col, mapper in self.mapping.items(): if self._normalize[col] and isinstance(self._min_group_size[col], int): self._min_group_size[col] = self._min_group_size[col] / X.shape[0] elif not self._normalize and isinstance(self._min_group_size[col], float): self._min_group_size[col] = self._min_group_size[col] * X.shape[0] if self._combine_min_nan_groups[col] is True: min_groups_idx = mapper < self._min_group_size[col] elif self._combine_min_nan_groups[col] == 'force': min_groups_idx = ( (mapper < self._min_group_size[col]) | (mapper.index.isnull()) ) else: min_groups_idx = ( (mapper < self._min_group_size[col]) & (~mapper.index.isnull()) ) min_groups_sum = mapper.loc[min_groups_idx].sum() if ( min_groups_sum > 0 and min_groups_idx.sum() > 1 and not min_groups_idx.loc[~min_groups_idx.index.isnull()].all() ): if isinstance(self._min_group_name[col], str): min_group_mapper_name = self._min_group_name[col] else: min_group_mapper_name = '_'.join([ str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values() ]) self._min_group_categories[col] = { cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist() } if not min_groups_idx.all(): mapper = mapper.loc[~min_groups_idx] mapper[min_group_mapper_name] = min_groups_sum self.mapping[col] = mapper def _check_set_create_attrs(self): """Check attributes setting that don't play nicely `self.cols`.""" if not ( (self.combine_min_nan_groups in ['force', True, False, None]) or isinstance(self.combine_min_nan_groups, dict) ): raise ValueError( "'combine_min_nan_groups' should be one of: " "['force', True, False, None] or type dict." ) if ( self.handle_missing == 'return_nan' and self.combine_min_nan_groups == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " "'combine_min_nan_groups' == 'force' for all columns." ) if ( self.combine_min_nan_groups is not None and self.min_group_size is None ): pass # raise ValueError( # "`combine_min_nan_groups` only works when `min_group_size` " # "is set for all columns." # ) if ( self.min_group_name is not None and self.min_group_size is None ): raise ValueError( "`min_group_name` only works when `min_group_size` is set " "for all columns." ) if self.combine_min_nan_groups is None: self.combine_min_nan_groups = True def _check_set_create_dict_attrs(self): """Check attributes that can be dicts and format for all `self.cols`.""" dict_attrs = { 'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value', } for attr_name, attr_default in dict_attrs.items(): attr = copy(getattr(self, attr_name)) if isinstance(attr, dict): for col in self.cols: if col not in attr: attr[col] = attr_default setattr(self, '_' + attr_name, attr) else: attr_dict = {} for col in self.cols: attr_dict[col] = attr setattr(self, '_' + attr_name, attr_dict) for col in self.cols: if ( self._handle_missing[col] == 'return_nan' and self._combine_min_nan_groups[col] == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " "'combine_min_nan_groups' == 'force' for columns `%s`." % (col,) ) if ( self._combine_min_nan_groups[col] is not True and self._min_group_size[col] is None ): raise ValueError( "`combine_min_nan_groups` only works when `min_group_size`" "is set for column %s." % (col,) ) if ( self._min_group_name[col] is not None and self._min_group_size[col] is None ): raise ValueError( "`min_group_name` only works when `min_group_size`" "is set for column %s." % (col,) ) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError("CountEncoder has to be fitted to return feature names.") else: return self.feature_names
"""Count Encoder""" import numpy as np import pandas as pd import category_encoders.utils as util from category_encoders.ordinal import OrdinalEncoder from copy import copy __author__ = 'joshua t. dunn' class CountEncoder(util.BaseEncoder, util.UnsupervisedTransformerMixin): prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', min_group_size=None, combine_min_nan_groups=None, min_group_name=None, normalize=False): """Count encoding for categorical features. For a given categorical feature, replace the names of the groups with the group counts. Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. normalize: bool or dict of {column : bool, ...}. whether to normalize the counts to the range (0, 1). See Pandas `value_counts` for more details. min_group_size: int, float or dict of {column : option, ...}. the minimal count threshold of a group needed to ensure it is not combined into a "leftovers" group. Default value is 0.01. If float in the range (0, 1), `min_group_size` is calculated as int(X.shape[0] * min_group_size). Note: This value may change type based on the `normalize` variable. If True this will become a float. If False, it will be an int. min_group_name: None, str or dict of {column : option, ...}. Set the name of the combined minimum groups when the defaults become too long. Default None. In this case the category names will be joined alphabetically with a `_` delimiter. Note: The default name can be long and may keep changing, for example, in cross-validation. combine_min_nan_groups: bool or dict of {column : bool, ...}. whether to combine the leftovers group with NaN group. Default True. Can also be forced to combine with 'force' meaning small groups are effectively counted as NaNs. Force can only used when 'handle_missing' is 'value' or 'error'. Note: Will not force if it creates an binary or invariant column. Example ------- >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> from category_encoders import CountEncoder >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(2) memory usage: 51.5 KB None References ---------- """ super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self.normalize = normalize self.min_group_size = min_group_size self.min_group_name = min_group_name self.combine_min_nan_groups = combine_min_nan_groups self.ordinal_encoder = None self._check_set_create_attrs() self._min_group_categories = {} self._normalize = {} self._min_group_name = {} self._combine_min_nan_groups = {} self._min_group_size = {} self._handle_unknown = {} self._handle_missing = {} def _fit(self, X, y=None, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self._check_set_create_dict_attrs() self._fit_count_encode(X_ordinal, y) return self def _transform(self, X): for col in self.cols: # Treat None as np.nan X[col] = pd.Series([el if el is not None else np.NaN for el in X[col]], index=X[col].index) if self.handle_missing == "value": if not util.is_category(X[col].dtype): X[col] = X[col].fillna(np.nan) if self._min_group_size is not None: if col in self._min_group_categories.keys(): X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col]) X[col] = X[col].astype(object).map(self.mapping[col]) if isinstance(self._handle_unknown[col], (int, np.integer)): X[col] = X[col].fillna(self._handle_unknown[col]) elif (self._handle_unknown[col] == 'value' and X[col].isna().any() and self._handle_missing[col] != 'return_nan' ): X[col].replace(np.nan, 0, inplace=True) elif ( self._handle_unknown[col] == 'error' and X[col].isnull().any() ): raise ValueError(f'Missing data found in column {col} at transform time.') return X def _fit_count_encode(self, X_in, y): """Perform the count encoding.""" X = X_in.copy(deep=True) if self.cols is None: self.cols = X.columns.values self.mapping = {} for col in self.cols: mapping_values = X[col].value_counts(normalize=self._normalize[col]) ordinal_encoding = [m["mapping"] for m in self.ordinal_encoder.mapping if m["col"] == col][0] reversed_ordinal_enc = {v: k for k, v in ordinal_encoding.to_dict().items()} mapping_values.index = mapping_values.index.map(reversed_ordinal_enc) self.mapping[col] = mapping_values if self._handle_missing[col] == 'return_nan': self.mapping[col][np.NaN] = np.NaN # elif self._handle_missing[col] == 'value': #test_count.py failing self.mapping[col].loc[-2] = 0 if any([val is not None for val in self._min_group_size.values()]): self.combine_min_categories(X) def combine_min_categories(self, X): """Combine small categories into a single category.""" for col, mapper in self.mapping.items(): if self._normalize[col] and isinstance(self._min_group_size[col], int): self._min_group_size[col] = self._min_group_size[col] / X.shape[0] elif not self._normalize and isinstance(self._min_group_size[col], float): self._min_group_size[col] = self._min_group_size[col] * X.shape[0] if self._combine_min_nan_groups[col] is True: min_groups_idx = mapper < self._min_group_size[col] elif self._combine_min_nan_groups[col] == 'force': min_groups_idx = ( (mapper < self._min_group_size[col]) | (mapper.index.isnull()) ) else: min_groups_idx = ( (mapper < self._min_group_size[col]) & (~mapper.index.isnull()) ) min_groups_sum = mapper.loc[min_groups_idx].sum() if ( min_groups_sum > 0 and min_groups_idx.sum() > 1 and not min_groups_idx.loc[~min_groups_idx.index.isnull()].all() ): if isinstance(self._min_group_name[col], str): min_group_mapper_name = self._min_group_name[col] else: min_group_mapper_name = '_'.join([ str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values() ]) self._min_group_categories[col] = { cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist() } if not min_groups_idx.all(): mapper = mapper.loc[~min_groups_idx] mapper[min_group_mapper_name] = min_groups_sum self.mapping[col] = mapper def _check_set_create_attrs(self): """Check attributes setting that don't play nicely `self.cols`.""" if not ( (self.combine_min_nan_groups in ['force', True, False, None]) or isinstance(self.combine_min_nan_groups, dict) ): raise ValueError( "'combine_min_nan_groups' should be one of: " "['force', True, False, None] or type dict." ) if ( self.handle_missing == 'return_nan' and self.combine_min_nan_groups == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " "'combine_min_nan_groups' == 'force' for all columns." ) if ( self.combine_min_nan_groups is not None and self.min_group_size is None ): pass # raise ValueError( # "`combine_min_nan_groups` only works when `min_group_size` " # "is set for all columns." # ) if ( self.min_group_name is not None and self.min_group_size is None ): raise ValueError( "`min_group_name` only works when `min_group_size` is set " "for all columns." ) if self.combine_min_nan_groups is None: self.combine_min_nan_groups = True def _check_set_create_dict_attrs(self): """Check attributes that can be dicts and format for all `self.cols`.""" dict_attrs = { 'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value', } for attr_name, attr_default in dict_attrs.items(): attr = copy(getattr(self, attr_name)) if isinstance(attr, dict): for col in self.cols: if col not in attr: attr[col] = attr_default setattr(self, '_' + attr_name, attr) else: attr_dict = {} for col in self.cols: attr_dict[col] = attr setattr(self, '_' + attr_name, attr_dict) for col in self.cols: if ( self._handle_missing[col] == 'return_nan' and self._combine_min_nan_groups[col] == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " f"'combine_min_nan_groups' == 'force' for columns `{col}`." ) if ( self._combine_min_nan_groups[col] is not True and self._min_group_size[col] is None ): raise ValueError(f"`combine_min_nan_groups` only works when `min_group_size` is set for column {col}.") if ( self._min_group_name[col] is not None and self._min_group_size[col] is None ): raise ValueError(f"`min_group_name` only works when `min_group_size` is set for column {col}.")
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
```suggestion X[col] = X[col].fillna(value=np.nan) ``` should be much more efficient no?
jona-sassenhagen
118
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/count.py
"""Count Encoder""" from __future__ import division import numpy as np import pandas as pd import category_encoders.utils as util from category_encoders.ordinal import OrdinalEncoder from copy import copy from sklearn.base import BaseEstimator, TransformerMixin __author__ = 'joshua t. dunn' class CountEncoder(BaseEstimator, TransformerMixin): def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', min_group_size=None, combine_min_nan_groups=None, min_group_name=None, normalize=False): """Count encoding for categorical features. For a given categorical feature, replace the names of the groups with the group counts. Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. normalize: bool or dict of {column : bool, ...}. whether to normalize the counts to the range (0, 1). See Pandas `value_counts` for more details. min_group_size: int, float or dict of {column : option, ...}. the minimal count threshold of a group needed to ensure it is not combined into a "leftovers" group. Default value is 0.01. If float in the range (0, 1), `min_group_size` is calculated as int(X.shape[0] * min_group_size). Note: This value may change type based on the `normalize` variable. If True this will become a float. If False, it will be an int. min_group_name: None, str or dict of {column : option, ...}. Set the name of the combined minimum groups when the defaults become too long. Default None. In this case the category names will be joined alphabetically with a `_` delimiter. Note: The default name can be long and may keep changing, for example, in cross-validation. combine_min_nan_groups: bool or dict of {column : bool, ...}. whether to combine the leftovers group with NaN group. Default True. Can also be forced to combine with 'force' meaning small groups are effectively counted as NaNs. Force can only used when 'handle_missing' is 'value' or 'error'. Note: Will not force if it creates an binary or invariant column. Example ------- >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> from category_encoders import CountEncoder >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(2) memory usage: 51.5 KB None References ---------- """ self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.verbose = verbose self.cols = cols self._dim = None self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.normalize = normalize self.min_group_size = min_group_size self.min_group_name = min_group_name self.combine_min_nan_groups = combine_min_nan_groups self.feature_names = None self.ordinal_encoder = None self._check_set_create_attrs() self._min_group_categories = {} self._normalize = {} self._min_group_name = {} self._combine_min_nan_groups = {} self._min_group_size = {} self._handle_unknown = {} self._handle_missing = {} def fit(self, X, y=None, **kwargs): """Fit encoder according to X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self._check_set_create_dict_attrs() self._fit_count_encode(X_ordinal, y) X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [ x for x in generated_cols if X_temp[x].var() <= 10e-5 ] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print(f"Could not remove column from feature names. Not found in generated cols.\n{e}") return self def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.' ) # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError( 'Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,) ) if not list(self.cols): return X X, _ = self._transform_count_encode(X, y) if self.drop_invariant: X = X.drop(columns=self.drop_cols) if self.return_df or override_return_df: return X else: return X.values def _fit_count_encode(self, X_in, y): """Perform the count encoding.""" X = X_in.copy(deep=True) if self.cols is None: self.cols = X.columns.values self.mapping = {} for col in self.cols: mapping_values = X[col].value_counts(normalize=self._normalize[col]) ordinal_encoding = [m["mapping"] for m in self.ordinal_encoder.mapping if m["col"] == col][0] reversed_ordinal_enc = {v: k for k, v in ordinal_encoding.to_dict().items()} mapping_values.index = mapping_values.index.map(reversed_ordinal_enc) self.mapping[col] = mapping_values if self._handle_missing[col] == 'return_nan': self.mapping[col][np.NaN] = np.NaN # elif self._handle_missing[col] == 'value': #test_count.py failing self.mapping[col].loc[-2] = 0 if any([val is not None for val in self._min_group_size.values()]): self.combine_min_categories(X) def _transform_count_encode(self, X_in, y): """Perform the transform count encoding.""" X = X_in.copy(deep=True) for col in self.cols: # Treat None as np.nan X[col] = pd.Series([el if el is not None else np.NaN for el in X[col]], index=X[col].index) if self.handle_missing == "value": if not util.is_category(X[col].dtype): X[col] = X[col].fillna(np.nan) if self._min_group_size is not None: if col in self._min_group_categories.keys(): X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col]) X[col] = X[col].astype(object).map(self.mapping[col]) if isinstance(self._handle_unknown[col], (int, np.integer)): X[col] = X[col].fillna(self._handle_unknown[col]) elif (self._handle_unknown[col] == 'value' and X[col].isna().any() and self._handle_missing[col] != 'return_nan' ): X[col].replace(np.nan, 0, inplace=True) elif ( self._handle_unknown[col] == 'error' and X[col].isnull().any() ): raise ValueError(f'Missing data found in column {col} at transform time.') return X, self.mapping def combine_min_categories(self, X): """Combine small categories into a single category.""" for col, mapper in self.mapping.items(): if self._normalize[col] and isinstance(self._min_group_size[col], int): self._min_group_size[col] = self._min_group_size[col] / X.shape[0] elif not self._normalize and isinstance(self._min_group_size[col], float): self._min_group_size[col] = self._min_group_size[col] * X.shape[0] if self._combine_min_nan_groups[col] is True: min_groups_idx = mapper < self._min_group_size[col] elif self._combine_min_nan_groups[col] == 'force': min_groups_idx = ( (mapper < self._min_group_size[col]) | (mapper.index.isnull()) ) else: min_groups_idx = ( (mapper < self._min_group_size[col]) & (~mapper.index.isnull()) ) min_groups_sum = mapper.loc[min_groups_idx].sum() if ( min_groups_sum > 0 and min_groups_idx.sum() > 1 and not min_groups_idx.loc[~min_groups_idx.index.isnull()].all() ): if isinstance(self._min_group_name[col], str): min_group_mapper_name = self._min_group_name[col] else: min_group_mapper_name = '_'.join([ str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values() ]) self._min_group_categories[col] = { cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist() } if not min_groups_idx.all(): mapper = mapper.loc[~min_groups_idx] mapper[min_group_mapper_name] = min_groups_sum self.mapping[col] = mapper def _check_set_create_attrs(self): """Check attributes setting that don't play nicely `self.cols`.""" if not ( (self.combine_min_nan_groups in ['force', True, False, None]) or isinstance(self.combine_min_nan_groups, dict) ): raise ValueError( "'combine_min_nan_groups' should be one of: " "['force', True, False, None] or type dict." ) if ( self.handle_missing == 'return_nan' and self.combine_min_nan_groups == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " "'combine_min_nan_groups' == 'force' for all columns." ) if ( self.combine_min_nan_groups is not None and self.min_group_size is None ): pass # raise ValueError( # "`combine_min_nan_groups` only works when `min_group_size` " # "is set for all columns." # ) if ( self.min_group_name is not None and self.min_group_size is None ): raise ValueError( "`min_group_name` only works when `min_group_size` is set " "for all columns." ) if self.combine_min_nan_groups is None: self.combine_min_nan_groups = True def _check_set_create_dict_attrs(self): """Check attributes that can be dicts and format for all `self.cols`.""" dict_attrs = { 'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value', } for attr_name, attr_default in dict_attrs.items(): attr = copy(getattr(self, attr_name)) if isinstance(attr, dict): for col in self.cols: if col not in attr: attr[col] = attr_default setattr(self, '_' + attr_name, attr) else: attr_dict = {} for col in self.cols: attr_dict[col] = attr setattr(self, '_' + attr_name, attr_dict) for col in self.cols: if ( self._handle_missing[col] == 'return_nan' and self._combine_min_nan_groups[col] == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " "'combine_min_nan_groups' == 'force' for columns `%s`." % (col,) ) if ( self._combine_min_nan_groups[col] is not True and self._min_group_size[col] is None ): raise ValueError( "`combine_min_nan_groups` only works when `min_group_size`" "is set for column %s." % (col,) ) if ( self._min_group_name[col] is not None and self._min_group_size[col] is None ): raise ValueError( "`min_group_name` only works when `min_group_size`" "is set for column %s." % (col,) ) def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError("CountEncoder has to be fitted to return feature names.") else: return self.feature_names
"""Count Encoder""" import numpy as np import pandas as pd import category_encoders.utils as util from category_encoders.ordinal import OrdinalEncoder from copy import copy __author__ = 'joshua t. dunn' class CountEncoder(util.BaseEncoder, util.UnsupervisedTransformerMixin): prefit_ordinal = True encoding_relation = util.EncodingRelation.ONE_TO_ONE def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', min_group_size=None, combine_min_nan_groups=None, min_group_name=None, normalize=False): """Count encoding for categorical features. For a given categorical feature, replace the names of the groups with the group counts. Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. normalize: bool or dict of {column : bool, ...}. whether to normalize the counts to the range (0, 1). See Pandas `value_counts` for more details. min_group_size: int, float or dict of {column : option, ...}. the minimal count threshold of a group needed to ensure it is not combined into a "leftovers" group. Default value is 0.01. If float in the range (0, 1), `min_group_size` is calculated as int(X.shape[0] * min_group_size). Note: This value may change type based on the `normalize` variable. If True this will become a float. If False, it will be an int. min_group_name: None, str or dict of {column : option, ...}. Set the name of the combined minimum groups when the defaults become too long. Default None. In this case the category names will be joined alphabetically with a `_` delimiter. Note: The default name can be long and may keep changing, for example, in cross-validation. combine_min_nan_groups: bool or dict of {column : bool, ...}. whether to combine the leftovers group with NaN group. Default True. Can also be forced to combine with 'force' meaning small groups are effectively counted as NaNs. Force can only used when 'handle_missing' is 'value' or 'error'. Note: Will not force if it creates an binary or invariant column. Example ------- >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> from category_encoders import CountEncoder >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 13 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(2) memory usage: 51.5 KB None References ---------- """ super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing) self.mapping = None self.normalize = normalize self.min_group_size = min_group_size self.min_group_name = min_group_name self.combine_min_nan_groups = combine_min_nan_groups self.ordinal_encoder = None self._check_set_create_attrs() self._min_group_categories = {} self._normalize = {} self._min_group_name = {} self._combine_min_nan_groups = {} self._min_group_size = {} self._handle_unknown = {} self._handle_missing = {} def _fit(self, X, y=None, **kwargs): self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) X_ordinal = self.ordinal_encoder.transform(X) self._check_set_create_dict_attrs() self._fit_count_encode(X_ordinal, y) return self def _transform(self, X): for col in self.cols: # Treat None as np.nan X[col] = pd.Series([el if el is not None else np.NaN for el in X[col]], index=X[col].index) if self.handle_missing == "value": if not util.is_category(X[col].dtype): X[col] = X[col].fillna(np.nan) if self._min_group_size is not None: if col in self._min_group_categories.keys(): X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col]) X[col] = X[col].astype(object).map(self.mapping[col]) if isinstance(self._handle_unknown[col], (int, np.integer)): X[col] = X[col].fillna(self._handle_unknown[col]) elif (self._handle_unknown[col] == 'value' and X[col].isna().any() and self._handle_missing[col] != 'return_nan' ): X[col].replace(np.nan, 0, inplace=True) elif ( self._handle_unknown[col] == 'error' and X[col].isnull().any() ): raise ValueError(f'Missing data found in column {col} at transform time.') return X def _fit_count_encode(self, X_in, y): """Perform the count encoding.""" X = X_in.copy(deep=True) if self.cols is None: self.cols = X.columns.values self.mapping = {} for col in self.cols: mapping_values = X[col].value_counts(normalize=self._normalize[col]) ordinal_encoding = [m["mapping"] for m in self.ordinal_encoder.mapping if m["col"] == col][0] reversed_ordinal_enc = {v: k for k, v in ordinal_encoding.to_dict().items()} mapping_values.index = mapping_values.index.map(reversed_ordinal_enc) self.mapping[col] = mapping_values if self._handle_missing[col] == 'return_nan': self.mapping[col][np.NaN] = np.NaN # elif self._handle_missing[col] == 'value': #test_count.py failing self.mapping[col].loc[-2] = 0 if any([val is not None for val in self._min_group_size.values()]): self.combine_min_categories(X) def combine_min_categories(self, X): """Combine small categories into a single category.""" for col, mapper in self.mapping.items(): if self._normalize[col] and isinstance(self._min_group_size[col], int): self._min_group_size[col] = self._min_group_size[col] / X.shape[0] elif not self._normalize and isinstance(self._min_group_size[col], float): self._min_group_size[col] = self._min_group_size[col] * X.shape[0] if self._combine_min_nan_groups[col] is True: min_groups_idx = mapper < self._min_group_size[col] elif self._combine_min_nan_groups[col] == 'force': min_groups_idx = ( (mapper < self._min_group_size[col]) | (mapper.index.isnull()) ) else: min_groups_idx = ( (mapper < self._min_group_size[col]) & (~mapper.index.isnull()) ) min_groups_sum = mapper.loc[min_groups_idx].sum() if ( min_groups_sum > 0 and min_groups_idx.sum() > 1 and not min_groups_idx.loc[~min_groups_idx.index.isnull()].all() ): if isinstance(self._min_group_name[col], str): min_group_mapper_name = self._min_group_name[col] else: min_group_mapper_name = '_'.join([ str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values() ]) self._min_group_categories[col] = { cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist() } if not min_groups_idx.all(): mapper = mapper.loc[~min_groups_idx] mapper[min_group_mapper_name] = min_groups_sum self.mapping[col] = mapper def _check_set_create_attrs(self): """Check attributes setting that don't play nicely `self.cols`.""" if not ( (self.combine_min_nan_groups in ['force', True, False, None]) or isinstance(self.combine_min_nan_groups, dict) ): raise ValueError( "'combine_min_nan_groups' should be one of: " "['force', True, False, None] or type dict." ) if ( self.handle_missing == 'return_nan' and self.combine_min_nan_groups == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " "'combine_min_nan_groups' == 'force' for all columns." ) if ( self.combine_min_nan_groups is not None and self.min_group_size is None ): pass # raise ValueError( # "`combine_min_nan_groups` only works when `min_group_size` " # "is set for all columns." # ) if ( self.min_group_name is not None and self.min_group_size is None ): raise ValueError( "`min_group_name` only works when `min_group_size` is set " "for all columns." ) if self.combine_min_nan_groups is None: self.combine_min_nan_groups = True def _check_set_create_dict_attrs(self): """Check attributes that can be dicts and format for all `self.cols`.""" dict_attrs = { 'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value', } for attr_name, attr_default in dict_attrs.items(): attr = copy(getattr(self, attr_name)) if isinstance(attr, dict): for col in self.cols: if col not in attr: attr[col] = attr_default setattr(self, '_' + attr_name, attr) else: attr_dict = {} for col in self.cols: attr_dict[col] = attr setattr(self, '_' + attr_name, attr_dict) for col in self.cols: if ( self._handle_missing[col] == 'return_nan' and self._combine_min_nan_groups[col] == 'force' ): raise ValueError( "Cannot have `handle_missing` == 'return_nan' and " f"'combine_min_nan_groups' == 'force' for columns `{col}`." ) if ( self._combine_min_nan_groups[col] is not True and self._min_group_size[col] is None ): raise ValueError(f"`combine_min_nan_groups` only works when `min_group_size` is set for column {col}.") if ( self._min_group_name[col] is not None and self._min_group_size[col] is None ): raise ValueError(f"`min_group_name` only works when `min_group_size` is set for column {col}.")
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
absolutely right! thanks
PaulWestenthanner
119
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
```suggestion X = X.drop(columns=self.invariant_cols) ``` The current option raises a FutureWarning in current Pandas versions. And inplace is usually frowned upon. Also this is one line shorter, and a lot faster.
jona-sassenhagen
120
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
```suggestion for x in self.invariant_cols: self.feature_names.remove(x) ``` don't use a list comp for its side effects
jona-sassenhagen
121
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
What's the end purpose of the `EncodingRelation`s?
bmreiniger
122
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
With both supervised and unsupervised as classes, I think the tag `supervised_encoder` is probably better removed and replaced with an inheritance check. In the test files then, maybe just create at the top a sublist of `__all__` that pass that (instead of doing the check inside the loops of the test methods themselves).
bmreiniger
123
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
I introduced it in the first place to add the output shape of the transform method dynamically in the docstring, but didn't get it to work since docstrings are not meant to be dynamic (I guess). I then decided to keep it since it could be useful metadata (e.g. you could add a test that the expected number of output columns is generated)
PaulWestenthanner
124
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
which could be actually a useful test by the way since when working on this I discovered that there might be problems with e.g. the backward difference encoder. This adds some intercept column and I guess if you encode two columns the latter intercept column overwrites the first one. But I'll have to check. If this turns out to be the case I'll create a separate issue for it
PaulWestenthanner
125
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
I think both options are equivalent and more or less depend on taste, is it? Or is there any real difference that I'm missing? I'm also happy with either, not having a strong preference for one or the other
PaulWestenthanner
126
scikit-learn-contrib/category_encoders
325
Refactor/base class
## Proposed Changes ### Streamline Encoders By introducing a `BaseEncoder` and a `Un/SupervisedTransformerMixin` that almost (c.f. below) all encoders inherit from the code is much more streamlined. This removes the boilerplate when implementing new encoders and hence makes it easier for new contributor. ### Following DRY Principle Common components are reused: - When fitting: input conversion, selecting cols, `handle_missing='error'`, setting `self.feature_names`, determine and handle invariant columns - When transforming: input conversion, checking if fitted, checking correct input dimension, `handle_missing='error'` - `get_feature_names` function ### No Breaking Changes Note that the API does not change. At least I did not have to change a single test and all our tested cases still work. Since the library is rather well tested, there is reason to be optimistic that such a big change does not break anything. ### Minor Changes - Rename `drop_cols` to `invariant_cols` since it is more precise to state the (only) reason why these columns are dropped. - Apply refit with different columns from LOO encoder to all others. - Introduce a metadata flag if an ordinal pre-encoder is fitted. This might be useful for further abstraction. - Started to add type annotations in some places - Streamline OneHotEncoder to return the input without converting if no categorical columns are present. All other encoders have this behaviour. Probably setting `return_df = False` and passing an `numpy.ndarray` would even throw an error in the current implementation. ## Fixes ### Major #166 At least a big step towards that goal. @janmotl suggests adding metadata to encoders via base classes. I'm introducing the base classes here, but only with a minimum of metadata. More can be added very easily. ### Minor #317 Broken link #237 Warning if transform does not do anything (see discussion) #122 This is also partially addressed. The fix for LOO-Encoder is deployed to all other encoders. However this only ensures that the `cols` parameter is treated correctly when calling fit twice. Not the other params. The solution would be probably to define a fitting-params object that cleared on every call to fit (but keeping some hyperparmeters like regularisation etc). ## Further ToDo's This PR is work in progress. I just want to line out my ideas to the community as soon as possible and enter discussion as soon as possible. When going through basically the whole code base I noticed some possible improvements and / or bugs related as well as unrelated to the restructuring: - The `_score` function that applies the category mapping and applies random noise to training data is copied 4 times. This can be moved to a mixin - BaseNEncoder and SummaryEncoder are based on other encoders and do not fit into the schema proposed. Probably they can be re-wirtten to use `_fit` and `_transform` functions of the underlying encoders rather than their `fit` and `transform`. This way the main encoder would still be in charge of handling inputs, dropping invariants etc. I'll look into this. - Docstrings: While most docstrings were copy-paste, some do contain valuable information. Probably the easiest is to just create a function that overwrites the base-class function, add the docstring and only do `return super().function()` - Docstrings (II): In transform functions often the output shape is given, I'm not sure if this is correct for all encoders since there are some copy-paste error. However this information would be useful to have (e.g. if one input column is transformed to 1 or N output columns). I'd move this to a separate issue - UPDATE 2022-05-31: Resolve merge conflicts -> done - UPDATE 2022-05-31: Integrate Ben Reininger's pull request ## NB Since this is a rather big change I'm not comfortable to do this on my own and I'd like to get some feedback on it from the community. I understand @wdm0006 does not have a lot of time but I'd be great if you could give your two cents on the idea of the change (without working through all the details - which would of course also be welcome). I'd also be interested in feedback from recent commiters and people interested in the project, e.g. @cmougan @bmreiniger
null
2021-11-28 12:23:41+00:00
2022-06-02 12:41:15+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" from abc import abstractmethod from enum import Enum, auto import pandas as pd import numpy as np import sklearn.base from sklearn.base import BaseEstimator, TransformerMixin from sklearn.exceptions import NotFittedError from typing import Dict, List, Optional, Union from scipy.sparse import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) if not obj_cols: print("Warning: No categorical columns found. Calling 'transform' will only return input data.") return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError(f'Unexpected input type: {type(X)}') elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError(f'Unexpected input shape: {np.shape(y)}') elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError(f'Unexpected input shape: {y.shape}') else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class EncodingRelation(Enum): # one input feature get encoded into one output feature ONE_TO_ONE = auto() # one input feature get encoded into as many output features as it has distinct values ONE_TO_N_UNIQUE = auto() # one input feature get encoded into m output features that are not the number of distinct values ONE_TO_M = auto() # all N input features are encoded into M output features. # The encoding is done globally on all the input not on a per-feature basis N_TO_M = auto() def get_docstring_output_shape(in_out_relation: EncodingRelation): if in_out_relation == EncodingRelation.ONE_TO_ONE: return "n_features" elif in_out_relation == EncodingRelation.ONE_TO_N_UNIQUE: return "n_features * respective cardinality" elif in_out_relation == EncodingRelation.ONE_TO_M: return "M features (n_features < M)" elif in_out_relation == EncodingRelation.N_TO_M: return "M features (M can be anything)" class BaseEncoder(BaseEstimator): _dim: Optional[int] cols: List[str] use_default_cols: bool handle_missing: str handle_unknown: str verbose: int drop_invariant: bool invariant_cols: List[str] = [] feature_names: Union[None, List[str]] = None return_df: bool supervised: bool encoding_relation: EncodingRelation INVARIANCE_THRESHOLD = 10e-5 # columns with variance less than this will be considered constant / invariant def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', **kwargs): """ Parameters ---------- verbose: int integer indicating verbosity of output. 0 for none. cols: list a list of columns to encode, if None, all string and categorical columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform and inverse transform (otherwise it will be a numpy array). handle_missing: str how to handle missing values at fit time. Options are 'error', 'return_nan', and 'value'. Default 'value', which treat NaNs as a countable category at fit time. handle_unknown: str, int or dict of {column : option, ...}. how to handle unknown labels at transform time. Options are 'error' 'return_nan', 'value' and int. Defaults to None which uses NaN behaviour specified at fit time. Passing an int will fill with this int value. kwargs: dict. additional encoder specific parameters like regularisation. """ self.return_df = return_df self.drop_invariant = drop_invariant self.invariant_cols = [] self.verbose = verbose self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X self.cols = cols self.mapping = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.feature_names = None self._dim = None def fit(self, X, y=None, **kwargs): """Fits the encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ self._check_fit_inputs(X, y) X, y = convert_inputs(X, y) self._dim = X.shape[1] self._get_fit_columns(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self._fit(X, y, **kwargs) # for finding invariant columns transform without y (as is done on the test set) X_transformed = self.transform(X, override_return_df=True) self.feature_names = X_transformed.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: generated_cols = get_generated_cols(X, X_transformed, self.cols) self.invariant_cols = [x for x in generated_cols if X_transformed[x].var() <= self.INVARIANCE_THRESHOLD] self.feature_names = [x for x in self.feature_names if x not in self.invariant_cols] return self def _check_fit_inputs(self, X, y): if self._get_tags().get('supervised_encoder') and y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') def _check_transform_inputs(self, X): if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise NotFittedError('Must train encoder before it can be used to transform data.') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError(f'Unexpected input dimension {X.shape[1]}, expected {self._dim}') def _drop_invariants(self, X: pd.DataFrame, override_return_df: bool) -> Union[np.ndarray, pd.DataFrame]: if self.drop_invariant: X = X.drop(columns=self.invariant_cols) if self.return_df or override_return_df: return X else: return X.values def _get_fit_columns(self, X: pd.DataFrame) -> None: """ Determine columns used by encoder. Note that the implementation also deals with re-fitting the same encoder object with different columns. :param X: input data frame :return: none, sets self.cols as a side effect """ # if columns aren't passed, just use every string column if self.use_default_cols: self.cols = get_obj_cols(X) else: self.cols = convert_cols_to_list(self.cols) def get_feature_names(self) -> List[str]: """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features (because the feature is constant/invariant) are not included! """ if not isinstance(self.feature_names, list): raise NotFittedError("Estimator has to be fitted to return feature names.") else: return self.feature_names @abstractmethod def _fit(self, X: pd.DataFrame, y: Optional[pd.Series], **kwargs): ... class SupervisedTransformerMixin(sklearn.base.TransformerMixin): def _more_tags(self): return {'supervised_encoder': True} def transform(self, X, y=None, override_return_df=False): """Perform the transformation to new categorical data. Some encoders behave differently on whether y is given or not. This is mainly due to regularisation in order to avoid overfitting. On training data transform should be called with y, on test data without. Parameters ---------- X : array-like, shape = [n_samples, n_features] y : array-like, shape = [n_samples] or None override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X, y = convert_inputs(X, y, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X, y) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame: ... def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y) class UnsupervisedTransformerMixin(sklearn.base.TransformerMixin): def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] override_return_df : bool override self.return_df to force to return a data frame Returns ------- p : array or DataFrame, shape = [n_samples, n_features_out] Transformed values with encoding applied. """ # first check the type X = convert_input(X, deep=True) self._check_transform_inputs(X) if not list(self.cols): return X X = self._transform(X) return self._drop_invariants(X, override_return_df) @abstractmethod def _transform(self, X) -> pd.DataFrame: ... class TransformerWithTargetMixin: def _more_tags(self): return {'supervised_encoder': True} def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
PaulWestenthanner
a18cb64a81310a5e515c7b21255597b4dfb29b86
2e3282239ade4dfff362e655be0f65fe0d0270e9
I think you're right that they're equivalent, I just think inheritance is simpler to understand, being base python instead of an sklearn mechanism.
bmreiniger
127
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
very nice catch!!
PaulWestenthanner
128
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
I think that's still not quite correct for the other options for handle missing: for `return_nan` it still adds a column. ```python df = pd.DataFrame([(1, "foo"), (2, "bar"), (4, None)], columns=["a", "b"]) encoder = OneHotEncoder(cols=["b"], handle_missing="return_nan", use_cat_names=True) encoder.fit_transform(df) ``` actually outputs ``` a b_foo b_bar b_nan 0 1 1.0 0.0 0.0 1 2 0.0 1.0 0.0 2 4 0.0 0.0 0.0 ``` whereas we would expect ``` a b_foo b_bar 0 1 1.0 0.0 1 2 0.0 1.0 2 4 nan nan ``` Am I missing something here or would you also expect this output? Maybe @wdm0006 could also comment on this? Also the `indicator` option which on the fly creates new columns should only create the `nan` column if there are in fact missing values. Also tests for those two options would be nice. Not your fault, but if we're already fixing one option we should fix all options.
PaulWestenthanner
129
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
~Maybe I don't understand what `indicator` is supposed to do: there's a test `test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn` that looks like what I expect `value` to do, not `indicator`.~ ~Oh, maybe it's the unknown `value` that's spawning that extra column in that test.~ ~Wait, no, I don't think I know what `value` is supposed to do either.~
bmreiniger
130
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
OK, I think I have `handle_missing='return_nan'` working properly. `handle_missing='indicator'` seems better off always generating the column, and that's what the code does and the tests test for. I'm in favor of just updating the docs to remove the warning (and explaining more what each option does).
bmreiniger
131
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
I agree with you. I also was wrong on what the `indicator` option is doing. The behaviour you implemented i.e. always create the new column in fit and then map the missing values to that option seems correct. I'd also welcome a better documentation on the available options.
PaulWestenthanner
132
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
maybe you can just use a dict here? ``` oe_handle_missing_mapping = {"error": "error", "return_nan": "return_nan", ... } ``` that way we'd also get a key error in case the option is invalid
PaulWestenthanner
133
scikit-learn-contrib/category_encoders
322
Fix ohe nan col
Fixes #295 ## Proposed Changes Prevents a column for missing values from being added in OneHotEncoder when handle_missing="error". Does this by preventing the underlying OrdinalEncoder from producing the mapping NaN->-2, by setting _its_ handle_missing to "error" as well. Also patches an incidental bug in OneHotEncoder.transform when handle_missing="error", when the input is not a frame. Hacktoberfest?
null
2021-10-29 02:20:47+00:00
2021-11-03 17:09:36+00:00
category_encoders/one_hot.py
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has unknown categories. This can cause unexpected changes in dimension in some cases. handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. Warning: if indicator is used, an extra column will be added in if the transform matrix has nan values. This can cause unexpected changes in dimension in some cases. Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value' ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] for cat_name, class_ in values.iteritems(): if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) base_matrix = np.eye(N=len(index), dtype=np.int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[values.loc[np.nan]] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col]) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
"""One-hot or dummy coding""" import numpy as np import pandas as pd import warnings from sklearn.base import BaseEstimator, TransformerMixin from category_encoders.ordinal import OrdinalEncoder import category_encoders.utils as util __author__ = 'willmcginnis' class OneHotEncoder(BaseEstimator, TransformerMixin): """Onehot (or dummy) coding for categorical features, produces one feature per category, each binary. Parameters ---------- verbose: int integer indicating verbosity of the output. 0 for none. cols: list a list of columns to encode, if None, all string columns will be encoded. drop_invariant: bool boolean for whether or not to drop columns with 0 variance. return_df: bool boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array). use_cat_names: bool if True, category values will be included in the encoded column names. Since this can result in duplicate column names, duplicates are suffixed with '#' symbol until a unique name is generated. If False, category indices will be used instead of the category values. handle_unknown: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` at transform time if there are new categories. 'return_nan' will encode a new value as `np.nan` in every dummy column. 'value' will encode a new value as 0 in every dummy column. 'indicator' will add an additional dummy column (in both training and test data). handle_missing: str options are 'error', 'return_nan', 'value', and 'indicator'. The default is 'value'. 'error' will raise a `ValueError` if missings are encountered. 'return_nan' will encode a missing value as `np.nan` in every dummy column. 'value' will encode a missing value as 0 in every dummy column. 'indicator' will treat missingness as its own category, adding an additional dummy column (whether there are missing values in the training set or not). Example ------- >>> from category_encoders import * >>> import pandas as pd >>> from sklearn.datasets import load_boston >>> bunch = load_boston() >>> y = bunch.target >>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names) >>> enc = OneHotEncoder(cols=['CHAS', 'RAD'], handle_unknown='indicator').fit(X, y) >>> numeric_dataset = enc.transform(X) >>> print(numeric_dataset.info()) <class 'pandas.core.frame.DataFrame'> RangeIndex: 506 entries, 0 to 505 Data columns (total 24 columns): CRIM 506 non-null float64 ZN 506 non-null float64 INDUS 506 non-null float64 CHAS_1 506 non-null int64 CHAS_2 506 non-null int64 CHAS_-1 506 non-null int64 NOX 506 non-null float64 RM 506 non-null float64 AGE 506 non-null float64 DIS 506 non-null float64 RAD_1 506 non-null int64 RAD_2 506 non-null int64 RAD_3 506 non-null int64 RAD_4 506 non-null int64 RAD_5 506 non-null int64 RAD_6 506 non-null int64 RAD_7 506 non-null int64 RAD_8 506 non-null int64 RAD_9 506 non-null int64 RAD_-1 506 non-null int64 TAX 506 non-null float64 PTRATIO 506 non-null float64 B 506 non-null float64 LSTAT 506 non-null float64 dtypes: float64(11), int64(13) memory usage: 95.0 KB None References ---------- .. [1] Contrast Coding Systems for Categorical Variables, from https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/ .. [2] Gregory Carey (2003). Coding Categorical Variables, from http://psych.colorado.edu/~carey/Courses/PSYC5741/handouts/Coding%20Categorical%20Variables%202006-03-03.pdf """ def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', use_cat_names=False): self.return_df = return_df self.drop_invariant = drop_invariant self.drop_cols = [] self.mapping = None self.verbose = verbose self.cols = cols self.ordinal_encoder = None self._dim = None self.handle_unknown = handle_unknown self.handle_missing = handle_missing self.use_cat_names = use_cat_names self.feature_names = None @property def category_mapping(self): return self.ordinal_encoder.category_mapping def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') oe_missing_strat = { 'error': 'error', 'return_nan': 'return_nan', 'value': 'value', 'indicator': 'return_nan', }[self.handle_missing] self.ordinal_encoder = OrdinalEncoder( verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing=oe_missing_strat, ) self.ordinal_encoder = self.ordinal_encoder.fit(X) self.mapping = self.generate_mapping() X_temp = self.transform(X, override_return_df=True) self.feature_names = list(X_temp.columns) if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self def generate_mapping(self): mapping = [] found_column_counts = {} for switch in self.ordinal_encoder.mapping: col = switch.get('col') values = switch.get('mapping').copy(deep=True) if self.handle_missing == 'value': values = values[values > 0] if len(values) == 0: continue index = [] new_columns = [] append_nan_to_index = False for cat_name, class_ in values.iteritems(): if pd.isna(cat_name) and self.handle_missing == 'return_nan': # we don't want a mapping column if return_nan # but do add the index to the end append_nan_to_index = class_ continue if self.use_cat_names: n_col_name = str(col) + '_%s' % (cat_name,) found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count else: n_col_name = str(col) + '_%s' % (class_,) index.append(class_) new_columns.append(n_col_name) if self.handle_unknown == 'indicator': n_col_name = str(col) + '_%s' % (-1,) if self.use_cat_names: found_count = found_column_counts.get(n_col_name, 0) found_column_counts[n_col_name] = found_count + 1 n_col_name += '#' * found_count new_columns.append(n_col_name) index.append(-1) if append_nan_to_index: index.append(append_nan_to_index) base_matrix = np.eye(N=len(index), M=len(new_columns), dtype=int) base_df = pd.DataFrame(data=base_matrix, columns=new_columns, index=index) if self.handle_unknown == 'value': base_df.loc[-1] = 0 elif self.handle_unknown == 'return_nan': base_df.loc[-1] = np.nan if self.handle_missing == 'return_nan': base_df.loc[-2] = np.nan elif self.handle_missing == 'value': base_df.loc[-2] = 0 mapping.append({'col': col, 'mapping': base_df}) return mapping def transform(self, X, override_return_df=False): """Perform the transformation to new categorical data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- p : array, shape = [n_samples, n_numeric + N] Transformed values with encoding applied. """ if self._dim is None: raise ValueError( 'Must train encoder before it can be used to transform data.') # first check the type X = util.convert_input(X) if self.handle_missing == 'error': if X[self.cols].isnull().any().any(): raise ValueError('Columns to be encoded can not contain null') # then make sure that it is the right size if X.shape[1] != self._dim: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values X = self.ordinal_encoder.transform(X) if self.handle_unknown == 'error': if X[self.cols].isin([-1]).any().any(): raise ValueError('Columns to be encoded can not contain new values') X = self.get_dummies(X) if self.drop_invariant: for col in self.drop_cols: X.drop(col, 1, inplace=True) if self.return_df or override_return_df: return X else: return X.values def inverse_transform(self, X_in): """ Perform the inverse transformation to encoded data. Parameters ---------- X_in : array-like, shape = [n_samples, n_features] Returns ------- p: array, the same size of X_in """ # fail fast if self._dim is None: raise ValueError('Must train encoder before it can be used to inverse_transform data') # first check the type and make deep copy X = util.convert_input(X_in, columns=self.feature_names, deep=True) X = self.reverse_dummies(X, self.mapping) # then make sure that it is the right size if X.shape[1] != self._dim: if self.drop_invariant: raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should " "be False when transforming the data" % (X.shape[1],)) else: raise ValueError('Unexpected input dimension %d, expected %d' % ( X.shape[1], self._dim, )) if not list(self.cols): return X if self.return_df else X.values for switch in self.ordinal_encoder.mapping: column_mapping = switch.get('mapping') inverse = pd.Series(data=column_mapping.index, index=column_mapping.values) X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type')) if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan': for col in self.cols: if X[switch.get('col')].isnull().any(): warnings.warn("inverse_transform is not supported because transform impute " "the unknown category nan when encode %s" % (col,)) return X if self.return_df else X.values def get_dummies(self, X_in): """ Convert numerical variable into dummy variables Parameters ---------- X_in: DataFrame Returns ------- dummies : DataFrame """ X = X_in.copy(deep=True) cols = X.columns.values.tolist() for switch in self.mapping: col = switch.get('col') mod = switch.get('mapping') base_df = mod.reindex(X[col].fillna(-2)) base_df = base_df.set_index(X.index) X = pd.concat([base_df, X], axis=1) old_column_index = cols.index(col) cols[old_column_index: old_column_index + 1] = mod.columns X = X.reindex(columns=cols) return X def reverse_dummies(self, X, mapping): """ Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame """ out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X def get_feature_names(self): """ Returns the names of all transformed / added columns. Returns ------- feature_names: list A list with all feature names transformed or added. Note: potentially dropped features are not included! """ if not isinstance(self.feature_names, list): raise ValueError( 'Must transform data first. Affected feature names are not known before.') else: return self.feature_names
bmreiniger
cc0c4b9ab66a52979b37f791836bea1241046b8c
0bcb96b6a505c9cec7c473578471491eab78b4eb
`append_nan_to_index` would be a less hacky name.
PaulWestenthanner
134
scikit-learn-contrib/category_encoders
320
Check array index fix
Closes #280. Fixes #272, probably also #290, and supersedes #304. ## Proposed Changes Replaces consecutive calls to `convert_input` (on `X`) and `convert_input_vector` (on `y`) by a single `convert_inputs` to ensure that the indexes of the results match. This is necessary for proper functioning of encoders that group `y` by values of `X`, and convenient otherwise. I don't like that `convert_inputs` is one character away from `convert_input`; other suggestions welcomed. One _could_ convert all remaining `convert_input` calls to `convert_inputs` with the default `y=None`, so that `convert_input` would join `convert_input_vector` as only used inside `convert_inputs`. I've also reduced the places where `y` gets cast to float, including that casting only when needed (in glmm where `statsmodels` would complain otherwise, and quantile where `numpy.quantile` would complain otherwise). And since `convert_input` has a deep-copy option, I've consolidated a few of the copies into the `convert_inputs`; there are others that I've not consolidated, mostly because the copy happens further away in the code. I'm not sure what needs to be done for a repository to "participate" in [Hacktoberfest](https://hacktoberfest.digitalocean.com/), but if it's as simple as a maintainer adding a label `hacktoberfest-approved` to the PR, I'd appreciate that.
null
2021-10-24 21:33:05+00:00
2021-10-29 15:40:38+00:00
category_encoders/utils.py
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse.csr import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_input(X, columns=None, deep=False): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep) else: raise ValueError('Unexpected input type: %s' % (str(type(X)))) elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError('Unexpected input shape: %s' % (str(np.shape(y)))) elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0 or (len(y)>0 and not isinstance(y[0], list)): # empty list or a vector return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError('Unexpected input shape: %s' % (str(y.shape))) else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
"""A collection of shared utilities for all encoders, not intended for external use.""" import pandas as pd import numpy as np from scipy.sparse.csr import csr_matrix __author__ = 'willmcginnis' def convert_cols_to_list(cols): if isinstance(cols, pd.Series): return cols.tolist() elif isinstance(cols, np.ndarray): return cols.tolist() elif np.isscalar(cols): return [cols] elif isinstance(cols, set): return list(cols) elif isinstance(cols, tuple): return list(cols) elif pd.api.types.is_categorical_dtype(cols): return cols.astype(object).tolist() return cols def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols def is_category(dtype): return pd.api.types.is_categorical_dtype(dtype) def convert_inputs(X, y, columns=None, index=None, deep=False): """ Unite arraylike `X` and vectorlike `y` into a DataFrame and Series. If both are pandas types already, raises an error if their indexes do not match. If one is pandas, the returns will share that index. If neither is pandas, a default index will be used, unless `index` is passed. Parameters ---------- X: arraylike y: listlike columns: listlike Specifies column names to use for `X`. Ignored if `X` is already a dataframe. If `None`, use the default pandas column names. index: listlike The index to use, if neither `X` nor `y` is a pandas type. (If one has an index, then this has no effect.) If `None`, use the default pandas index. deep: bool Whether to deep-copy `X`. """ X_alt_index = y.index if isinstance(y, pd.Series) else index X = convert_input(X, columns=columns, deep=deep, index=X_alt_index) if y is not None: y = convert_input_vector(y, index=X.index) # N.B.: If either was already pandas, it keeps its index. if any(X.index != y.index): raise ValueError("`X` and `y` both have indexes, but they do not match.") if X.shape[0] != y.shape[0]: raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".") return X, y def convert_input(X, columns=None, deep=False, index=None): """ Unite data into a DataFrame. Objects that do not contain column names take the names from the argument. Optionally perform deep copy of the data. """ if not isinstance(X, pd.DataFrame): if isinstance(X, pd.Series): X = pd.DataFrame(X, copy=deep) else: if columns is not None and np.size(X,1) != len(columns): raise ValueError('The count of the column names does not correspond to the count of the columns') if isinstance(X, list): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) # lists are always copied, but for consistency, we still pass the argument elif isinstance(X, (np.generic, np.ndarray)): X = pd.DataFrame(X, columns=columns, copy=deep, index=index) elif isinstance(X, csr_matrix): X = pd.DataFrame(X.todense(), columns=columns, copy=deep, index=index) else: raise ValueError('Unexpected input type: %s' % (str(type(X)))) elif deep: X = X.copy(deep=True) return X def convert_input_vector(y, index): """ Unite target data type into a Series. If the target is a Series or a DataFrame, we preserve its index. But if the target does not contain index attribute, we use the index from the argument. """ if y is None: raise ValueError('Supervised encoders need a target for the fitting. The target cannot be None') if isinstance(y, pd.Series): return y elif isinstance(y, np.ndarray): if len(np.shape(y))==1: # vector return pd.Series(y, name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[0]==1: # single row in a matrix return pd.Series(y[0, :], name='target', index=index) elif len(np.shape(y))==2 and np.shape(y)[1]==1: # single column in a matrix return pd.Series(y[:, 0], name='target', index=index) else: raise ValueError('Unexpected input shape: %s' % (str(np.shape(y)))) elif np.isscalar(y): return pd.Series([y], name='target', index=index) elif isinstance(y, list): if len(y)==0: # empty list return pd.Series(y, name='target', index=index, dtype=float) elif len(y)>0 and not isinstance(y[0], list): # vector return pd.Series(y, name='target', index=index) elif len(y)>0 and isinstance(y[0], list) and len(y[0])==1: # single row in a matrix flatten = lambda y: [item for sublist in y for item in sublist] return pd.Series(flatten(y), name='target', index=index) elif len(y)==1 and len(y[0])==0 and isinstance(y[0], list): # single empty column in a matrix return pd.Series(y[0], name='target', index=index, dtype=float) elif len(y)==1 and isinstance(y[0], list): # single column in a matrix return pd.Series(y[0], name='target', index=index, dtype=type(y[0][0])) else: raise ValueError('Unexpected input shape') elif isinstance(y, pd.DataFrame): if len(list(y))==0: # empty DataFrame return pd.Series(name='target', index=index, dtype=float) if len(list(y))==1: # a single column return y.iloc[:, 0] else: raise ValueError('Unexpected input shape: %s' % (str(y.shape))) else: return pd.Series(y, name='target', index=index) # this covers tuples and other directly convertible types def get_generated_cols(X_original, X_transformed, to_transform): """ Returns a list of the generated/transformed columns. Arguments: X_original: df the original (input) DataFrame. X_transformed: df the transformed (current) DataFrame. to_transform: [str] a list of columns that were transformed (as in the original DataFrame), commonly self.cols. Output: a list of columns that were transformed (as in the current DataFrame). """ original_cols = list(X_original.columns) if len(to_transform) > 0: [original_cols.remove(c) for c in to_transform] current_cols = list(X_transformed.columns) if len(original_cols) > 0: [current_cols.remove(c) for c in original_cols] return current_cols class TransformerWithTargetMixin: def fit_transform(self, X, y=None, **fit_params): """ Encoders that utilize the target must make sure that the training data are transformed with: transform(X, y) and not with: transform(X) """ if y is None: raise TypeError('fit_transform() missing argument: ''y''') return self.fit(X, y, **fit_params).transform(X, y)
bmreiniger
866bf143fb71db0de60d32e608393c1a3b8a71a7
cc0c4b9ab66a52979b37f791836bea1241046b8c
we still support python 3.5 (although we should probably change that). However, because of this we cannot work with f-strings at the moment
PaulWestenthanner
135