
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import pearsonr

from sklearn.datasets import load_iris, fetch_20newsgroups
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction import DictVectorizer
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.tree import DecisionTreeClassifier, export_graphviz


def dataset_demo():
    iris = load_iris()
    # 训练集特征值,测试集特征值,训练集目标值, 测试集目标值
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)

    print("决策树--信息熵")
    estimator = DecisionTreeClassifier(criterion="entropy")
    estimator.fit(x_train,y_train)

    score = estimator.score(x_test, y_test)
    export_graphviz(estimator,out_file =  "iris_tree.dot")

    print("准确率为:\n", score)



    return None


if __name__ == "__main__":
    # (dataset_demo())
    dataset_demo()


def dataset_demo_DecisionTreeClassifier():
    iris = load_iris()
    # print("\n", iris)
    data = iris.data
    df = pd.DataFrame(data, columns=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'])


    # 训练集特征值,测试集特征值,训练集目标值, 测试集目标值
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)
    # print("更新为: \n", x_train)
    print("决策树--信息熵")
    estimator = DecisionTreeClassifier(criterion="entropy")
    estimator.fit(x_train,y_train)
    # 5)模型评估
    # 方法1:直接比对真实值和预测值
    y_predict = estimator.predict(x_test)
    # print("y_ predict:\n", y_predict)
    # print("直接比对真实值和预测值:\n", y_test == y_predict)
    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    export_graphviz(estimator,out_file =  "iris_tree.dot")

    print("准确率为:\n", score)


    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)
    print("朴素贝叶斯")
    # 4)朴素贝叶斯算法预估器流程
    estimator =MultinomialNB()
    estimator.fit(x_train,y_train)
    # 5)模型评估
    # 方法1:直接比对真实值和预测值
    y_predict = estimator.predict(x_test)
    # print("y_ predict:\n", y_predict)
    # print("直接比对真实值和预测值:\n",y_test == y_predict)
    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)


    print("KNN")
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)
    estimator = KNeighborsClassifier(n_neighbors=3)
    estimator.fit(x_train,y_train)


    # 5)模型评估
    # 方法1:直接比对真实值和预测值
    y_predict = estimator.predict(x_test)
    # print("y_ predict:\n", y_predict)
    # print("直接比对真实值和预测值:\n",y_test == y_predict)
    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)

    return None


def dataset_demo_MultinomialNB():
    news = fetch_20newsgroups(subset="all")
    print(news)
    x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.2, random_state=22)
    # 文本数据


    # 创建TfidfVectorizer实例
    transfer = TfidfVectorizer()
    # 3)特征工程:文本特征抽取-tfidftransfer = TfidfVectorizer()
    x_train = transfer.fit_transform(x_train)
    x_test =transfer.transform(x_test)
    # 4)朴素贝叶斯算法预估器流程
    estimator =MultinomialNB()
    estimator.fit(x_train,y_train)
    # 5)模型评估
    # 方法1:直接比对真实值和预测值
    y_predict = estimator.predict(x_test)
    print("y_ predict:\n", y_predict)
    print("直接比对真实值和预测值:\n",y_test == y_predict)
    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:\n", score)
    return None



def dataset_demo_knn():
    iris = load_iris()
    # 训练集特征值,测试集特征值,训练集目标值, 测试集目标值
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)

    print("------------------------"
          "-------------")
    data = iris.data
    df = pd.DataFrame(data, columns=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'])

    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)
    estimator = KNeighborsClassifier(n_neighbors=3)

    estimator.fit(x_train, y_train)
    estimator_x = estimator.predict(x_test)
    score = estimator.score(x_test,y_test)
    print(score)
    print("------------score-------------------------")

    param_dict = {"n_neighbors":[1,3,5,7,9,11]}

    estimator = GridSearchCV(estimator, param_grid=param_dict, cv=10)

    estimator.fit(x_train, y_train)
    estimator_x = estimator.predict(x_test)
    score = estimator.score(x_test,y_test)
    print(score)
    print("-------------score1------------------------")



    print("estimator_x:\n", estimator_x)
    print("-------------------------------------")

    # 最佳参数:best_params
    print("最佳参数:\n",estimator.best_params_)  # 最佳结果:best score
    print("最佳结果:\n",estimator.best_score_)  # 最佳估计器:best estimator
    print("最佳估计器:\n",estimator.best_estimator_)
    #交叉验证结果:cv results
    print("交叉验证结果:\n",estimator.cv_results_)

    print("-------------------------------------")

    isTrue = 0
    isFalse = 0
    for i in range(len(y_test)):
        flag = estimator_x[i] == y_test[i]

        if(flag == True):
            isTrue = isTrue + 1
        else:
            isFalse = isFalse + 1


    print(isFalse)
    print(isTrue)
    return None


def dataset_demo_pca():
    iris = load_iris()
    # print("\n", iris)
    data = iris.data

    print("------------------------"
          "-------------")
    df = pd.DataFrame(data, columns=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'])
    df = df.iloc[:, 0:4]
    print("\n", df)

    transfer = VarianceThreshold(threshold=0.5)
    data_new = transfer.fit_transform(df)


    print("------------------------"
          "-------------")
    print(data_new.shape)
    print("------------------")
    print(pearsonr(df['SepalLength'], df['SepalWidth']))
    print("------------------")
    transfer = PCA(n_components=0.99)
    print(transfer.fit_transform(df))
    # 训练集特征值,测试集特征值,训练集目标值, 测试集目标值
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)
    print("------------------")
    # 创建一个示例数据集
    X = np.array([[1, 2, 3],
                  [4, 5, 6],
                  [7, 8, 9],
                  [10, 11, 12]])

    # 初始化 PCA,设置要保留的主成分数量
    pca = PCA(n_components=2)

    # 拟合数据并转换
    X_pca = pca.fit_transform(X)

    # 输出降维后的数据
    print(X_pca)

    # print("更新为: \n", x_train)
    # print("更新为: \n", y_train, y_test)
    return None




def dataset_demo2():
    iris = load_iris()
    # print("\n", iris)
    data = iris.data
    df = pd.DataFrame(data, columns=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth'])

    df = df.iloc[:, :4]
    print("\n", df)
    transfer = MinMaxScaler(feature_range=[0,1])
    transfer = StandardScaler()



    data_minmax = transfer.fit_transform(df)
    print(data_minmax)
    # 训练集特征值,测试集特征值,训练集目标值, 测试集目标值
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)
    # print("更新为: \n", x_train)
    # print("更新为: \n", y_train, y_test)
    return None


def dataset_demo1():

    data = [{'city':'北京','temperature':100}, {'city':'上海', 'temperature': 60}, {'city': '深圳','temperature':30}]
    #1、实例化一个转换器类
    transfer = DictVectorizer(sparse=False)

    # 2、调用fit transform()

    data_new = transfer.fit_transform(data)


    print(data_new)
    print(transfer.get_feature_names())


    return None






#
# # 设置随机种子以获得可重复的结果
# np.random.seed(0)
#
# # 生成1000个均值为0,方差为1的数据点
# data = np.random.normal(loc=0, scale=1, size=1000)
#
# # 绘制直方图
# plt.hist(data, bins=30, density=True, alpha=0.6, color='b')
#
# # 添加正态分布曲线
# mu, sigma = 0, 1
# x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
# plt.plot(x, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (x - mu)**2 / (2 * sigma**2) ), linewidth=2, color='r')
#
# # 设置图表标题和轴标签
# plt.title('Histogram of Normal Distribution (mean=0, variance=1)')
# plt.xlabel('Value')
# plt.ylabel('Frequency')
#
# # 显示图表
# plt.show()