from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression,Ridge,SGDRegressor
from sklearn.metrics import mean_squared_error

def knn_iris():
    """
    KNN算法对iris数据进行分类
    :return:
    """
    # 1） 获取数据
    iris = load_iris()
    # 2） (数据处理 : 这个数据集一般不需要处理) 数据集划分
    x_train,x_test,y_train,y_test = train_test_split(iris.data,iris["target"],random_state=22)
    # 3)  特征工程 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4）KNN近邻预估计流程
    estimator = KNeighborsClassifier(n_neighbors=3)
    estimator.fit(x_train,y_train)
    # 5）模型评估
    # 方法1：直接比对真实数据
    y_predict= estimator.predict(x_test)
    print("y_predict :",y_predict)
    print("直接比对真实值与预测值:",y_test ==y_predict)
    # 方法2：计算准确率
    score = estimator.score(x_test,y_test)
    print("准确率为 ",score)
    return None

def knn_iris_gscv():
    """
    KNN算法对iris数据进行分类,添加网格搜索和交叉验证
    :return:
    """
    # 1） 获取数据
    iris = load_iris()
    # 2） (数据处理 : 这个数据集一般不需要处理) 数据集划分
    x_train,x_test,y_train,y_test = train_test_split(iris.data,iris["target"],random_state=22)
    # 3)  特征工程 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4）KNN近邻预估计流程
    estimator = KNeighborsClassifier(n_neighbors=3)
    # estimator.fit(x_train,y_train)
    #加入网格搜索与交叉验证
    param_dict = {"n_neighbors":[1,3,5,7,9,11]}
    estimator = GridSearchCV(estimator,param_grid=param_dict,cv=10)
    estimator.fit(x_train, y_train)
    # 5）模型评估
    # 方法1：直接比对真实数据
    y_predict= estimator.predict(x_test)
    print("y_predict :",y_predict)
    print("直接比对真实值与预测值:",y_test ==y_predict)
    # 方法2：计算准确率
    score = estimator.score(x_test,y_test)
    print("准确率为 ",score)
    #查看结果：
    print("最佳参数：",estimator.best_params_)
    print("最佳结果：",estimator.best_score_)
    # 最佳估计器： KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
    #                             metric_params=None, n_jobs=None, n_neighbors=7, p=2,
    #                             weights='uniform')
    # 这个结果 metric='minkowski'表示采用明科夫斯基距离，其中配合参数p=2表示是采用欧式距离，平方根距离，p=1标识曼哈顿距离，绝对值距离
    print("最佳估计器：",estimator.best_estimator_)
    print("交叉验证结果：",estimator.cv_results_)
    return None

def nb_demo():
    """
    朴素贝叶斯新闻分类
    :return:
    """
    # 1 获取数据
    data = fetch_20newsgroups(subset="all")
    # 2 划分数据集
    x_train,x_test,y_train,y_test = train_test_split(data,test_size=0.25,random_state=22)
    # 3 特征工程 文本特征抽取-tfidf
    transfer = TfidfVectorizer()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4 朴素贝叶斯算法预估器流程
    estimator = MultinomialNB(alpha=1.0)
    estimator.fit(x_train,y_train)
    # 5 模型预估
    # 直接评估
    y_predict = estimator.predict(x_test)
    print("y_predict :", y_predict)
    print("直接比对真实值与预测值:", y_test == y_predict)
    # 评估准确率
    score = estimator.score(x_test,y_test)
    print("准确率为 ", score)
    return None

def linear1():
    """
    正规方程的优化方式对波士顿房价进行线性回归预测。
    :return:
    """
    # 1 获取数据集
    boston = load_boston()
    print("特征数量\n",boston.data.shape)
    # 2 划分数据集
    x_train,x_test,y_train,y_test = train_test_split(boston.data,boston.target,random_state=22)
    # 3 特征工程
    # 1) 无量纲化 - 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4 预估器流程
    estimator = LinearRegression()
    # fit 训练模型
    estimator.fit(x_train,y_train)
    # coef_ 回归系数  intercept_ 偏置
    # coef_就是各个特征的参数矩阵 ， intercept_就是最后加的那个常量。
    print("正规方程权重系数为：\n",estimator.coef_)
    print("正规方程偏执参数为：\n",estimator.intercept_)
    # 5 模型评估 均方误差
    y_predict = estimator.predict(x_test)
    #print("正规方程预测房价\n",y_predict)
    error = mean_squared_error(y_test,y_predict)
    print("正规方程预测房价均方误差为\n",error)
    return None

def linear2():
    """
    梯度下降的优化方式对波士顿房价进行线性回归预测。
    :return:
    """
    # 1 获取数据集
    boston = load_boston()
    # 2 划分数据集
    x_train,x_test,y_train,y_test = train_test_split(boston.data,boston.target,random_state=22)
    # 3 特征工程
    # 1) 无量纲化 - 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4 预估器流程
    # estimator = SGDRegressor()
    estimator = SGDRegressor(learning_rate="constant",eta0=0.01,max_iter=10000)
    # 调参
    # fit 训练模型
    estimator.fit(x_train,y_train)
    # coef_ 回归系数  intercept_ 谝置
    print("梯度下降权重系数为：\n",estimator.coef_)
    print("梯度下降偏执参数为：\n",estimator.intercept_)
    # 5 模型评估
    y_predict = estimator.predict(x_test)
    #print("梯度下降预测房价\n", y_predict)
    error = mean_squared_error(y_test, y_predict)
    print("梯度下降预测房价均方误差为\n" ,error)
    return None

def linear3():
    """
    岭回归梯度下降的优化方式对波士顿房价进行线性回归预测。
    :return:
    """
    # 1 获取数据集
    boston = load_boston()
    # 2 划分数据集
    x_train,x_test,y_train,y_test = train_test_split(boston.data,boston.target,random_state=22)
    # 3 特征工程
    # 1) 无量纲化 - 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4 预估器流程
    # estimator = Ridge()
    estimator = Ridge(alpha=0.5,max_iter=10000)
    # 调参
    # fit 训练模型
    estimator.fit(x_train,y_train)
    # coef_ 回归系数  intercept_ 谝置
    print("岭回归权重系数为：\n",estimator.coef_)
    print("岭回归偏执参数为：\n",estimator.intercept_)
    # 5 模型评估
    y_predict = estimator.predict(x_test)
    #print("岭回归预测房价\n", y_predict)
    error = mean_squared_error(y_test, y_predict)
    print("岭回归预测房价均方误差为\n" ,error)
    return None

if __name__=="__main__":
    # knn_iris()
    #knn_iris_gscv()
    # nb_demo()
    linear1()
    linear2()
    linear3()
