import numpy as np
import tensorflow.keras as keras

# 1.保存模型后，对模型进行验证
np.testing.assert_allclose(predictions, new_prediction, atol=1e-6)  # 预测结果一样

# 2.保存模型
model.save('path_to_my_model.h5')
del model
model = keras.models.load_model('path_to_my_model.h5')

# 3.最大值
self.best = np.Inf

# 4.数据缩放
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 5.GridSearchCV trains the best model found on the whole training set (you can change this by setting `refit=False`), so we don't need to do it again. We can simply evaluate the model's accuracy:

# 6.产生1000个子集来进行训练

from sklearn.model_selection import ShuffleSplit

n_trees = 1000
n_instances = 100

mini_sets = []

rs = ShuffleSplit(n_splits=n_trees, test_size=len(X_train) - n_instances, random_state=42)
for mini_train_index, mini_test_index in rs.split(X_train):
    X_mini_train = X_train[mini_train_index]
    y_mini_train = y_train[mini_train_index]
    mini_sets.append((X_mini_train, y_mini_train))

# 7.投票输出最优结果

from scipy.stats import mode

y_pred_majority_votes, n_votes = mode(Y_pred, axis=0)

# 8.分层采样
from sklearn.model_selection import StratifiedShuffleSplit

strat_split = StratifiedShuffleSplit(n_splits=1, test_size=40, random_state=42)
train_valid_idx, test_idx = next(strat_split.split(olivetti.data, olivetti.target))
X_train_valid = olivetti.data[train_valid_idx]
y_train_valid = olivetti.target[train_valid_idx]
X_test = olivetti.data[test_idx]
y_test = olivetti.target[test_idx]

# 9.
