import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from python_ai.common.xcommon import sep
from sklearn.datasets import load_iris
from sklearn.neighbors import  KNeighborsRegressor, KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, train_test_split

pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', 1000, 'display.expand_frame_repr', False)
plt.rcParams['font.sans-serif'] = ['Simhei']
plt.rcParams['axes.unicode_minus'] = False

np.random.seed(666)
x, y = load_iris(return_X_y=True)

x_train, x_test, y_train, y_test = train_test_split(x, y,
                                                    train_size=0.7,
                                                    random_state=666)

knn = KNeighborsClassifier()
params = dict(n_neighbors=[2, 3, 4, 5],
              # ATTENTION Teacher is wrong!
              # weights=['uniform',  # 曼哈顿距离 (几何意义：走直角的路径)
              #          'distance',  # 欧式距离
              #          ],
              p=[1, 2],
              #     p : integer, optional (default = 2)
              #         Power parameter for the Minkowski metric. When p = 1, this is
              #         equivalent to using manhattan_distance (l1), and euclidean_distance
              #         (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
              #
              metric='minkowski',
              #     metric : string or callable, default 'minkowski'
              #         the distance metric to use for the tree.  The default metric is
              #         minkowski, and with p=2 is equivalent to the standard Euclidean
              #         metric. See the documentation of the DistanceMetric class for a
              #         list of available metrics.
)

grid = GridSearchCV(knn, params, cv=5, iid=True)
grid.fit(x_train, y_train)
print(f'Best score: {grid.best_score_}')
print(f'Best params: {grid.best_params_}')

model = KNeighborsClassifier(**(grid.best_params_))
model.fit(x_train, y_train)
print(f'Training score: {model.score(x_train, y_train)}')
print(f'Testing score: {model.score(x_test, y_test)}')
h_test = model.predict_proba(x_test)
