# 引入数据集 load小数据集，fetch大数据集
from sklearn.datasets import load_iris, fetch_california_housing
# 引入训练模型
from sklearn.neighbors import KNeighborsClassifier
# 引入可视化工具
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd

# 数据集划分
# import sklearn.model_selection.tests.test_split as test_split
from sklearn.model_selection import train_test_split

# 预处理--
from sklearn.preprocessing import MinMaxScaler,StandardScaler

# 小数据集--于本地
iris = load_iris()
print(iris)
# 获取数据集
X = iris['data']
y = iris['target']
val = iris['target_names']

print("x:", X, "y:", y)

# 定义模型
mediator = KNeighborsClassifier(n_neighbors=5)
# 训练模型
mediator.fit(X, y)
# 预测
rs = mediator.predict([[3.0, 2.1, 1.0, 1.2]])
print(rs, val[rs])

# 大数据集
california_housing = fetch_california_housing()
print(california_housing)

# 返回值，data,target,target_names,DESC,feature

# 可视化展示
print("--------------------------------------------")
iris_data = pd.DataFrame(data=X, columns=["L_Len", "M_Len", "L_Width", "M_Width"])
iris_data["target"] = y
print(iris_data)


# 画图--
def plot_xy(data, col1, col2):
    # fit_reg拟合
    sns.lmplot(x=col1, y=col2, data=data, hue="target", fit_reg=False)
    plt.show()


# 二维图只能从二个特征值去观察分布？
plot_xy(iris_data, "L_Len", "M_Len")

# 数据集划分
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print("训练集的特征值:\n", x_train)
print("测试集的特征值:\n", x_test)
print("训练集的目标值:\n", y_train)
print("测试集的目标值:\n", y_test)

# 特征处理--
# 归一化处理
# 实例化
transfer = MinMaxScaler(feature_range=(0,1))
transfer_data = transfer.fit_transform(X)
print(transfer_data)
print(pd.DataFrame(transfer_data,columns=["1","2","3","4"]))
# 标准化
transfer = StandardScaler()
transfer_data = transfer.fit_transform(X)
print(transfer_data)
print(pd.DataFrame(transfer_data,columns=["1","2","3","4"]))