from sklearn import datasets
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import  train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import pandas as pd


def load_datasets():
    """
    获取sklearn提供的小量和大量的数据集
    :return: 
    """

    # 创建鸢尾花的数据集对象
    # ir = datasets.load_iris()
    # print(ir.feature_names)
    # print("数据位: ", ir.data)
    # print(ir.target_names)
    # print(ir.target)
    # 调用方法
    news = datasets.fetch_20newsgroups(subset='all')
    # 文档数据就没有特征名称
    # print(news.feature_names)
    print(news.data)
    # print(news.target_names)
    print(news.target)
    return None

def splitdatasets():
    """
    切分鸢尾花的数据集
    :return: 
    """
    ir = datasets.load_iris()
    # data 是特征数据, target 是目标数据
    # x_train: 训练特征集,  x_test: 测试特征集  y_train: 训练目标集 y_test测试的目标集
    x_train, x_test, y_train, y_test = train_test_split(ir.data, ir.target, test_size=0.2, random_state=11)
    print(x_train)
    print(x_test)
    print(y_train)
    print(y_test)

    x_train, x_test, y_train, y_test = train_test_split(ir.data, ir.target, test_size=0.2, random_state=1111)
    print(x_train)
    print(x_test)
    print(y_train)
    print(y_test)
    return None


def knncls():
    """
    K-近邻算法实现入住
    :return: None
    """
    # 1. 读取数据
    data = pd.read_csv('./data/FBlocation/train.csv')
    # 2. 筛选数据 距离x: 1~1.25, y:2.5 ~ 2.75
    data = data.query("x < 1.0 & y < 1.0")
    # 3. 增加时间特征 weekday, day, hour
    time_value = data['time']
    time_value = pd.to_datetime(time_value, unit='s')
    time_value = pd.DatetimeIndex(time_value)
    data['weekday'] = time_value.weekday
    data['day'] = time_value.day
    data = data.drop(['time'], axis=1)
    # 4.1 删除入住点较少的数据
    place_count = data.groupby('place_id').count()
    tf = place_count[place_count.row_id > 400].reset_index()
    data = data[data['place_id'].isin(tf.place_id)]
    print(data)
    # 5. 开始获取特征数据 和 目标数据
    y = data['place_id']
    x = data.drop(["place_id"], axis=1)
    # 6. 切分数据集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
    # 6.1 创建标准化对象
    std = StandardScaler()
    x_train = std.fit_transform(x_train)
    x_test = std.transform(x_test)
    # 7. 选择k-近邻模型开始输入训练的特征数据和目标数据来训练得到数据
    knn = KNeighborsClassifier(n_neighbors=3)
    # knn.fit(x_train, y_train)
    # # 8. 通过添加测试的训练数据 和 测试的目标数据 得到预测的准确率
    # ret = knn.score(x_test, y_test)
    # y_prdict = knn.predict(x_test)
    # print(ret)
    # cls = classification_report(y_test,y_prdict)
    # print(cls)

    crv = GridSearchCV(knn, param_grid={"n_neighbors": [1,3,5]}, cv=2)
    crv.fit(x_train, y_train)
    print("网格搜索和交叉验证")
    print(crv.score(x_test, y_test))
    print(crv.best_score_)
    print(crv.best_estimator_)
    print(crv.cv_results_)
    return None


def byas():
    """
    通过朴素贝叶斯算法对新闻类型进行预测
    :return: None
    """
    news = datasets.fetch_20newsgroups(subset='all')
    # 将数据分成训练集合测试集
    # x_test 测试的特征数据
    x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.2)
    print(y_test)
    # 对数据进行tfidf的处理操作
    tf = TfidfVectorizer()
    x_train = tf.fit_transform(x_train)
    x_test = tf.transform(x_test)
    # 创建朴素贝叶斯的模型 来训练数据
    nav = MultinomialNB()
    nav.fit(x_train, y_train)
    # 通过测试的特征数据获取模型预测的数据
    y_predict = nav.predict(x_test)
    print("预测的结果为: ", y_predict)
    ret = nav.score(x_test, y_test)
    print("准确率为: ", ret)

    return None


def decisiontree():
    """
    通过决策树对泰坦尼克号数据分析
    :return: None
    """
    # 读取数据
    titan = pd.read_csv("http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt")
    # 获取特征值 和 目标值
    x = titan[["age","sex","pclass"]]
    y = titan["survived"]
    # 处理年龄的缺失值 以平均值填充
    x['age'].fillna(x['age'].mean(), inplace=True)

    # 切分训练集和测试集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)

    # 需要数据转换为字典格式
    x_train = x_train.to_dict(orient="records")
    x_test = x_test.to_dict(orient="records")
    # 对训练集和测试集的特征数据 进行特征提取
    dict_vec = DictVectorizer(sparse=False)
    x_train = dict_vec.fit_transform(x_train)
    print(dict_vec.get_feature_names())
    x_test = dict_vec.transform(x_test)
    # 创建模型
    de = DecisionTreeClassifier(max_depth=3)
    de.fit(x_train, y_train)
    score = de.score(x_test, y_test)
    print("准确率: ", score)
    export_graphviz(de,'./tree.dot',feature_names=['年龄', 'pclass=1st', 'pclass=2nd', 'pclass=3rd', 'sex=female', 'sex=女性'])

    return None

if __name__ == '__main__':
    "泛华"
    # load_datasets()
    # splitdatasets()
    # knncls()
    # byas()
    # knncls()
    decisiontree()