import pandas as pd
from sklearn.model_selection import train_test_split

# Read the data
X = pd.read_csv('../../data/housing_train.csv', index_col='Id')
X_test = pd.read_csv('../../data/housing_test.csv', index_col='Id')

# Remove rows with missing target, separate target from predictions
# 因为SalePrice column是必须保留的，所以通过 axis=0 删除了SalePrice字段为空的记录
X.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X.SalePrice
X.drop(['SalePrice'], axis=1, inplace=True)

# To keep thins simple, we will drop columns with missing values
# 对于其它非必要column，只要有空值，就直接将该列给删除了
cols_with_missing = [col for col in X.columns if X[col].isnull().any()]
X.drop(cols_with_missing, axis=1, inplace=True)
X_test.drop(cols_with_missing, axis=1, inplace=True)

# Break off validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
print(X_train.head())

from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error


# function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
    model = RandomForestRegressor(n_estimators=100, random_state=0)
    model.fit(X_train, y_train)
    predicts = model.predict(X_valid)
    return mean_absolute_error(y_valid, predicts)


# --- step 1: drop columns with categorical data ---
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
print("MAE from Approach 1 (Drop categorical variables) : %s" %
      score_dataset(drop_X_train, drop_X_valid, y_train, y_valid))

# Investigate the dataset, we will find the value categories of Conditions field are different in two dataset
# So, if using ordinal encoding directly will get errors
print("Unique values in 'Condition2' column in training data : ", X_train['Condition2'].unique())
print("Unique values in 'Condition2' column in validation data : ", X_valid['Condition2'].unique())

# --- Step 2: Ordinal encoding ---
# Part A : fit an ordinal encoder to training data
# find categorical columns in the training data. 需要进行类型编码的都是文本字段
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
# Columns that can be safely ordinal encoded. 找到测试集里column的编码类型是训练集里这个column的编码类型的子集
# 也就是说要保证训练集和测试集编码字段的类型要一致，不要出现训练集里有，但测试集里出现新的类型
good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))]
# problematic columns that will be dropped from dataset
bad_label_cols = list(set(object_cols) - set(good_label_cols))
print("Categorical columns that will be ordinal encoded: ", good_label_cols)
print("Categorical columns that will be dropped from the dataset: ", bad_label_cols)

# Part B :
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import LabelEncoder
# drop categorical columns that will not be encoded
label_X_train = X_train.drop(bad_label_cols, axis=1)
label_X_valid = X_valid.drop(bad_label_cols, axis=1)
# print(label_X_train)
# 对字符类型的 good_label_cols 列进行序号编码（Ordinal Encode）
ordinal_encoder = OrdinalEncoder()
label_X_train[good_label_cols] = ordinal_encoder.fit_transform(X_train[good_label_cols])
label_X_valid[good_label_cols] = ordinal_encoder.transform(X_valid[good_label_cols])
print("MAE from Approach 2 (Ordinal Encoding) : %s" %
      score_dataset(label_X_train, label_X_valid, y_train, y_valid))

# 其实按照 https://www.zhihu.com/question/421194789 这里的解释，其实使用 LabelEncoder 应该更合理一些
# 但测试发现，LabelEncoder 只能对某一列进行编码，并且要求在结果集合中增加一个新的列
# 而上面的 OrdinalEncoder 可以对一批列进行编码，并且在原有cell的位置用新值替换了原有的值
# 这样计算比较简单。在不考虑字段值域前后顺序的情况下，OrdinalEncoder和LabelEncoder是一样的
# label_encoder = LabelEncoder()
# label_X_train['mszoing_label_encode'] = label_encoder.fit_transform(X_train.MSZoning)
# label_X_valid['mszoing_label_encode'] = label_encoder.transform(X_valid.MSZoning)
# print(label_X_train)
# print("MAE from Approach 2 (Ordinal Encoding) : %s" %
#       score_dataset(label_X_train, label_X_valid, y_train, y_valid))


# get number of unique entries in each column with categorical data
# 从 objects_cols 里取出各列的名字，在X_train里求各列数据中唯一值的个数，在转换成list输出
object_nunique = list(map(lambda col: X_train[col].nunique(), object_cols))
# list(zip) 返回的结果是[('Street', 2), ('LotShape', 4), ...]
# dict(zip) 返回的结果是{'Street': 2, 'LotShape': 4, ...}
d = dict(zip(object_cols, object_nunique))
# print number of unique entries by columns, in ascending order
print(sorted(d.items(), key=lambda x: x[1]))


# --- Step 3: Investigating cardinality ---
# 通过上面的分析，类别数超过10的有3个字段，类别最多的字段'Neighborhood'需要25个外加列才能用Onehot方式表示
# 因此，我们只能对类别数少的字段使用Onehot方式编码
# columns that will be one-hot encoded
low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10]
# columns that will be dropped from the dataset
high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols))
print('Categorical columns that will be one-hot encoded : ', low_cardinality_cols)
print('Categorical columns that will be dropped for the dataset : ', high_cardinality_cols)

# --- Step 4: One-hot encoding ---
from sklearn.preprocessing import OneHotEncoder

onehot_encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=False)
OH_train_cols = pd.DataFrame(onehot_encoder.fit_transform(X_train[low_cardinality_cols]))
OH_valid_cols = pd.DataFrame(onehot_encoder.transform(X_valid[low_cardinality_cols]))
OH_train_cols.index = X_train.index
OH_valid_cols.index = X_valid.index
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)

OH_X_train = pd.concat([num_X_train, OH_train_cols], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_valid_cols], axis=1)

OH_X_train.columns = OH_X_train.columns.astype(str)
OH_X_valid.columns = OH_X_valid.columns.astype(str)
print('MAE from Approach 3 (One-Hot Encoding) : %s' %
      score_dataset(OH_X_train, OH_X_valid, y_train, y_valid))


