from sklearn import datasets
import numpy as np
 
iris_data = datasets.load_iris()
print(type(iris_data))
# data里面是花萼长度、花萼宽度、花瓣长度、花瓣宽度的测量数据，格式为NumPy数组

# print(iris_data['data'])  # 花的样本数据
# print(f"花的样本数量: {iris_data['data'].shape}")
# print(f"花的前5个样本数据: {iris_data['data'][:5]}")

# # 0代表 setosa, 1代表 versicolor, 2代表 virginica
# print(iris_data['target'])  # 类别
# print(iris_data['target_names'])  # 花的品种

from sklearn.model_selection import train_test_split

X = iris_data['data'][:,:]  # 获取全部特征
y = iris_data['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

from sklearn.preprocessing import StandardScaler

# 均值方差归一化
standardScaler = StandardScaler()
# 先基于训练集数据进行拟合，计算用于标准化的统计量（均值、方差等）
standardScaler.fit(X_train)

# 得到原始数据特征值的均值和方差
X_train_avg = np.round(X_train.mean(axis=0), 2)  
X_train_var = np.round(X_train.var(axis=0),2)

# 打印均值和方差
N = len(X[0])
for i in range(N):
    print(f'特征{i+1}的均值为{X_train_avg[i]},方差为{X_train_var[i]}')

# 对训练集和测试集进行标准化
X_train_std = standardScaler.transform(X_train)
X_test_std = standardScaler.transform(X_test)

##=============================================================
### 模型训练

from sklearn.neighbors import KNeighborsClassifier

# 构建KNN模型
estimator = KNeighborsClassifier(n_neighbors=5)
# 训练模型
estimator.fit(X_train, y_train)

y_pre = estimator.predict(X_test)  # 预测值
score = np.round(estimator.score(X_test, y_test),2)  # 准确率

print("实际结果为:\n", y_test)
print("预测结果为:\n", y_pre)
print("对比结果为:\n", y_pre == y_test)
print("准确率为:\n", score)

#===============================================================

## K折交叉验证
# from sklearn.model_selection import cross_val_score
# # 实例化KNN模型, KNN的 K=5
# knn = KNeighborsClassifier(n_neighbors=5)

# # 10-fold cross-validation, cv=10
# scores = cross_val_score(knn, X_train, y_train, cv=10, scoring='accuracy')

# print(np.round(scores,2))

# # 计算平均值
# print(f'10折交叉验证后预测准确率为:{np.round(scores.mean(),2)}')

#===============================================================

## 网格超参数搜索
# from sklearn.model_selection import GridSearchCV
# from sklearn.datasets import load_iris
# from sklearn.model_selection import train_test_split
# from sklearn.neighbors import KNeighborsClassifier

# # 实例化KNN模型，KNN的K=5
# knn = KNeighborsClassifier(n_neighbors=5)
# param_grid = [
#     {
#         'weights': ['uniform'],
#         'n_neighbors': [i for i in range(1, 11)]
#     },
#     {
#         'weights': ['distance'],
#         'n_neighbors': [i for i in range(1, 11)],
#         'p': [i for i in range(1, 6)]
#     }
# ]

# grid_search = GridSearchCV(knn, param_grid, n_jobs=-1, verbose=2)
# grid_search.fit(X_train, y_train)
# # 最好的一组对应的分类器
# print(grid_search.best_estimator_)
# # 最好的超参数组合对应的准确率
# print(np.round(grid_search.best_score_, 2))
# # 最好的一组超参数
# best_params = grid_search.best_params_
# formatted_params = {key: np.round(value, 2) if isinstance(value, (int, float))\
#                      else value for key, value in best_params.items()}
# print(formatted_params)
# # 用最好的一组超参数的分类器对象去预测测试集，并计算准确率。
# knn_clf = grid_search.best_estimator_
# y_pre = knn_clf.predict(X_test)
# print(np.round(knn_clf.score(X_test, y_test), 2))
