# 数据预处理 preprocessing 和impute

# 数据无量纲化————提升提升、矩阵为核心的算法模型的求解速度，或者是距离类模型的精度
# 无量纲化包括中心化（Zero-centered或者Mean-subtraction）或者缩放处理（Scale）

# ____________________________归一化_____________________________________
# MinMaxScaler处理缩放（一般用作归一化）
from sklearn.preprocessing import MinMaxScaler

data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
scaler = MinMaxScaler()
result = scaler.fit_transform(data)
# feature_range指定缩放范围
# result = MinMaxScaler(feature_range=[5, 10]).fit_transform(data)
# 还原数据
inverseTransform = scaler.inverse_transform(result)

# numpy实现归一化
import numpy as np

data = np.asarray([[-1, 2], [-0.5, 6], [0, 10], [1, 18]])
# 公式就是 (x-x的最小值)/（x的最大值-x的最小值）
scalerByNp = (data - data.min(axis=0)) / (data.max(axis=0) - data.min(axis=0))
# 还原
data = scalerByNp * (data.max(axis=0) - data.min(axis=0)) + data.min(axis=0)
# ____________________________数据标准化_____________________________________


# 数据标准化 本质是生成均值与方差
from sklearn.preprocessing import StandardScaler

data = np.asarray([[-1, 2], [-0.5, 6], [0, 10], [1, 18]])
standard_scaler = StandardScaler()
transform = standard_scaler.fit(data)
# print(standard_scaler.mean_)
# print(standard_scaler.var_)
fit_transform = standard_scaler.fit_transform(data)
# print(fit_transform)
# 还原
data = standard_scaler.inverse_transform(fit_transform)
# print(data)

# ____________________________impute处理缺失值_____________________________________

from sklearn.impute import SimpleImputer
import pandas as pd

data = pd.read_csv('/Users/skyf/MachineLearning/sklearn/localDataSet/Narrativedata.csv', index_col=0)
# print(data)
# print(data.info())
age = data["Age"].values.reshape(-1, 1)
# strategy 默认均值 median中值 most_frequent众数 constant可以在fill_value指定数值或字符 copy=false填充到原特征
imputer = SimpleImputer(strategy="median", copy=False)
imputer.fit_transform(age)
# print(data.info())

# 处理embarked列
embarked = data.loc[:, "Embarked"].values.reshape(-1, 1)
imp_mode = SimpleImputer(strategy="most_frequent")
data.loc[:, "Embarked"] = imp_mode.fit_transform(embarked)
# print(data.info())

# ____________________________处理分类型特征：编码与哑变量_____________________________________
# 标签y专用，将分类转换为分类数值_分类标签编码
from sklearn.preprocessing import LabelEncoder

# 三种类型标签被分为了数值型
y = data.iloc[:, -1]
le = LabelEncoder()
le.fit(y)
label = le.transform(y)
# 还原
y = le.inverse_transform(label)

# 特征专用，将分类特征转换为分类数值 用法和上面一样
# OrdinalEncoder不能处理名义变量（特征值相互独立，没有联系 ）
from sklearn.preprocessing import OrdinalEncoder

# 分类特征编码(在这里列出来一下 这里不适用)
# from sklearn.preprocessing import OrdinalEncoder

data_ = data.copy()
oe = OrdinalEncoder().fit(data_.iloc[:, 1:-1])
# print(oe.categories_)
data.iloc[:, 1:-1] = oe.transform(data_.iloc[:, 1:-1])
# print(data.head())

# 使用独热编码，将名义变量转换为哑变量
from sklearn.preprocessing import OneHotEncoder

# 数据中 名义变量只有sex embarked
dataOnehot = data.copy()
x = dataOnehot.iloc[:, 1:-1]
# categories auto 让模型自行查找每个特征有多少类结果
encoder = OneHotEncoder(categories='auto')
encoder_fit_transform = encoder.fit_transform(x)
result = encoder_fit_transform.toarray()
# 查看转换后的特征
# print(encoder.get_feature_names())
# 还原
# x = encoder.inverse_transform(result)

# 拼接回data
newData = pd.concat([data, pd.DataFrame(result)], axis=1)
# print(newData.head())
newData.drop(["Sex", "Embarked"], axis=1, inplace=True)

newData.columns = ["Age", "Survived", "Feamle", "Male", "Embarked_C", "Embarked_Q", "Embarked_S"]
# print(newData.head())

# ____________________________处理分类型特征：二值化和分段_____________________________________

from sklearn.preprocessing import Binarizer

dataBinarizer = data.copy()
# 原始数据是一维数组转换为sklearn可用的特征纬度
x = dataBinarizer.iloc[:, 0].values.reshape(-1, 1)
# 30是个阈值 以30为分界线
threshold__fit_transform = Binarizer(threshold=30).fit_transform(x)
# print(threshold__fit_transform)


from sklearn.preprocessing import KBinsDiscretizer

dataKbd = data.copy()
# 取出年龄
x = dataKbd.iloc[:, 0].values.reshape(-1, 1)
# n_binsfebu每个特征中分箱的个数，默认5  encode编码方式，默认onehot strategy定义箱宽的方式 默认quantile
discretizer = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy='uniform')
result = discretizer.fit_transform(x)
# 查看转换后的箱：一列中的三箱
print(set(x.ravel()))

discretizer = KBinsDiscretizer(n_bins=3, encode="onehot", strategy='uniform')
# 转换后的箱：哑变量
result = discretizer.fit_transform(x)
print(result.toarray())
