from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge, RidgeCV, LogisticRegression
from sklearn.metrics import mean_squared_error, classification_report
import pandas as pd
import numpy as np

def example_bostan():
    """
    通过线性回归对波士顿房间进行预测
    :return: None
    """
    bostan = datasets.load_boston()
    # 切分训练集合特征集
    x_train, x_test, y_train, y_test = train_test_split(bostan.data, bostan.target, test_size=0.2)

    # 对于训练的特征数据 和测试的特征数据进行标准化的处理
    std = StandardScaler()
    x_train = std.fit_transform(x_train)
    x_test = std.transform(x_test)

    # 正规方程
    lr = LinearRegression()
    lr.fit(x_train, y_train)
    # 获取预测的结果
    lr_pre = lr.predict(x_test)
    print(lr_pre)

    print("真实的房价: ", y_test)
    # 在回归类型的问题中不适用score来进行评判
    # print(lr.score(x_test, y_test))
    mean_square = mean_squared_error(y_test, lr_pre)
    print("均方误差值为: ", mean_square)

    # 梯度下降
    sgd = SGDRegressor()
    sgd.fit(x_train, y_train)
    sgd_pre = sgd.predict(x_test)
    print("梯度下降得到的预测值: ", sgd_pre)
    sgd_mean_square = mean_squared_error(y_test, sgd_pre)
    print("梯度下降得到的均方误差值: ", sgd_mean_square)

    # 岭回归
    ridge = RidgeCV(alphas=(1.0, 0.5, 0.01))
    ridge.fit(x_train, y_train)
    rid_pre = ridge.predict(x_test)
    print("梯度下降得到的预测值: ", rid_pre)
    rid_mean_square = mean_squared_error(y_test, rid_pre)
    print("梯度下降得到的均方误差值: ", rid_mean_square)
    print(ridge.alpha_)

    return None


def logistic():
    """
    通过逻辑回归对肿瘤数据分析
    :return: None
    """
    column_names = ['Sample code number', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',
                    'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',
                    'Normal Nucleoli', 'Mitoses', 'Class']
    data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data", names=column_names)
    # 处理缺失值 ?
    # 将 ? 替换为 nan
    data = data.replace(to_replace='?', value=np.nan)
    # 删除缺失值的数据
    data = data.dropna()
    # 获取特征数据 和 目标数据
    x = data[column_names[1:10]]
    y = data[column_names[10]]
    # 切分训练集合 和 测试集合
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)

    log = LogisticRegression()
    # fit数据
    log.fit(x_train, y_train)

    print("回顾的参数为: ", log.coef_)
    log_pre = log.predict(x_test)
    print(log_pre)
    # 获取预测值
    # 预测的准确率
    score = log.score(x_test, y_test)
    print("逻辑回归预测的准确率: ", score)

    ret = classification_report(y_test, log_pre,labels=[2,4],target_names=["良性", '恶性'])
    print(ret)

    return None


if __name__ == '__main__':
    example_bostan()
    # logistic()