from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.feature_extraction import DictVectorizer
import pandas as pd

def knn_iris():
    """
    用KNN算法对鸢尾花进行分类
    :return:
    """
    # 1.获取数据
    iris = load_iris()
    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(iris.data,iris.target,random_state=6)
    # 3.特征工程：标准化,fit进行计算，transform用来进行转换
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4.KNN算法预估器
    estimator = KNeighborsClassifier(n_neighbors=3)
    estimator.fit(x_train,y_train)
    # 5.模型评估
    y_predict = estimator.predict(x_test)
    print("y_predict:\n",y_predict)
    print("直接比对真实值和预测值：\n",y_test==y_predict)

    # 计算准确率
    score= estimator.score(x_test, y_test)
    print("准确率为:\n", score)

def knn_iris_gscv():
    """
       用KNN算法对鸢尾花进行分类，添加网格搜索和交叉认证
       :return:
       """
    # 1.获取数据
    iris = load_iris()
    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=6)
    # 3.特征工程：标准化,fit进行计算，transform用来进行转换
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4.KNN算法预估器
    estimator = KNeighborsClassifier()
    # 5.加入网格搜索与交叉验证
    param_dict = {"n_neighbors":[1, 3, 5, 7, 9, 11]}
    estimator = GridSearchCV(estimator,param_grid=param_dict,cv=10)

    estimator.fit(x_train, y_train)
    # 5.模型评估
    y_predict = estimator.predict(x_test)
    print("y_predict:\n", y_predict)
    print("直接比对真实值和预测值：\n", y_test == y_predict)

    # 计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)

    # 最佳参数：best_params_
    print("最佳参数：\n",estimator.best_params_)
    # 最佳结果：best_score_
    print("最佳结果：\n",estimator.best_score_)
    # 最佳估计器：best_estimator_
    print("最佳估计器：\n", estimator.best_estimator_)
    # 交叉验证结果：cv_results_
    print("交叉验证结果：\n", estimator.cv_results_)

def facebook_demo():
    data = pd.read_csv("D:/03study/book/python/Python3天快速入门机器学项目资料/机器学xiday2资料/02-代码/FBlocation/train.csv",nrows=10000)
    # 缩小数据范围
    #data.query("x<2.5 & x>2 & y>1.5 & y<10")
    #data.head()
    # 处理时间特征
    time_value = pd.to_datetime(data["time"], unit="s")
    date = pd.DatetimeIndex(time_value)

    data["day"] = date.day
    data["weekday"] = date.weekday
    data["hour"] = date.hour

    # 过滤点签到次数少的地点
    place_count = data.groupby("place_id").count()["row_id"]
    place_count[place_count > 3]

    data_final = data[data["place_id"].isin(place_count[place_count > 3].index.values)]
    print(data_final)
    x = data_final[["x", "y", "accuracy", "day", "weekday", "hour"]]
    y = data_final["place_id"]
    #print(x.head())
    x_train, x_test, y_train, y_test = train_test_split(x, y)

    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4.KNN算法预估器
    estimator = KNeighborsClassifier()
    # 5.加入网格搜索与交叉验证
    param_dict = {"n_neighbors": [1, 3, 5, 7, 9, 11]}
    estimator = GridSearchCV(estimator, param_grid=param_dict, cv=3)

    estimator.fit(x_train, y_train)
    # 5.模型评估
    y_predict = estimator.predict(x_test)
    print("y_predict:\n", y_predict)
    print("直接比对真实值和预测值：\n", y_test == y_predict)

    # 计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)

    # 最佳参数：best_params_
    print("最佳参数：\n", estimator.best_params_)
    # 最佳结果：best_score_
    print("最佳结果：\n", estimator.best_score_)
    # 最佳估计器：best_estimator_
    print("最佳估计器：\n", estimator.best_estimator_)
    # 交叉验证结果：cv_results_
    print("交叉验证结果：\n", estimator.cv_results_)
    print(data_final)

def nb_news():
    """
    用朴素贝叶斯算法对新闻进行分类
    :return:
    """
    # 1.获取数据
    news = fetch_20newsgroups(data_home="D:/06git/00python/17_机器学习/nb_news", subset="all")
    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(news.data, news.target)

    transfer = TfidfVectorizer()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.朴素贝叶斯算法
    estimator = MultinomialNB()
    estimator.fit(x_train, y_train)

    # 5.模型评估
    y_predict = estimator.predict(x_test)
    print("y_predict:\n", y_predict)
    print("直接比对真实值和预测值：\n", y_test == y_predict)

    # 计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)


def decision_iris():
    """
    用决策树对鸢尾花进行分类
    :return:
    """
    # 1.获取数据集
    iris = load_iris()
    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=22)
    # 3.决策树预估器
    estimator = DecisionTreeClassifier(criterion="entropy")
    estimator.fit(x_train, y_train)
    # 4.模型评估
    y_predict = estimator.predict(x_test)
    print("y_predict:\n", y_predict)
    print("直接比对真实值和预测值：\n", y_test == y_predict)

    # 计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)

    # 可视化决策树
    export_graphviz(estimator, out_file="iris_tree.dot", feature_names=iris.feature_names)

def titanic_demo():
    """
    泰坦尼克号乘客生存预测
    :return:
    """
    titanic = pd.read_csv("./titanic/titanic.csv")

    # 筛选特征值和目标值
    x = titanic[["pclass", "age", "sex"]]
    y = titanic["survived"]

    # 数据处理
    # 缺失值处理
    x["age"].fillna(x["age"].mean(), inplace=True)

    # 转换成字典
    x = x.to_dict(orient="records")
    # 数据集划分
    x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=22)

    # 字典特征抽取
    transfer = DictVectorizer()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 决策树
    estimator = DecisionTreeClassifier(criterion="entropy")
    estimator.fit(x_train, y_train)
    # 4.模型评估
    y_predict = estimator.predict(x_test)
    print("y_predict:\n", y_predict)
    print("直接比对真实值和预测值：\n", y_test == y_predict)

    # 计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)

    # 可视化决策树
    export_graphviz(estimator, out_file="titanic_tree.dot", feature_names=transfer.get_feature_names_out())


if __name__=="__main__":
    #knn_iris()
    #knn_iris_gscv()
    #facebook_demo()
    #nb_news()
    #decision_iris()
    titanic_demo()