import pandas
import jieba
from scipy.stats import pearsonr
from sklearn.datasets import load_iris
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV


def demo():
    iris = load_iris()
    print("描述：\n", iris["DESCR"])
    print("特征值：\n", iris.data, iris.data.shape)
    print("特征名：\n", iris.feature_names)
    print("目标值：\n", iris.target_names)
    print("目标：\n", iris.target)
    # 数据集的划分
    x_train, x_test, y_train, y_test, = train_test_split(iris.data, iris.target, random_state=22, test_size=0.2)
    print("x_train \n", x_train)
    print("x_train shape\n", x_train.shape)
    return None


# 字典特征提取，提取特征值，特征名
def extract_dict():
    data = [{'city': '北京', 'temperature': 100}, {'city': '上海', 'temperature': 60},
            {'city': '深圳', 'temperature': 30}]

    # 实例化一个字段抽取类 sparse 默认为true,稀疏矩阵
    transfer = DictVectorizer(sparse=False)
    # 调用fit_new
    data_new = transfer.fit_transform(data)
    print('特征值：\n', data_new)
    print("特征名: \n", transfer.get_feature_names_out())
    return None


# 文本特征抽取
def extract_text():
    data = ["Clearly, SOAP is separating is not ", "the not same as a null pointer"]
    """
    统计样本每个特征值出现的次数 特征名是每个单词（只有一次）
    中文是按照一个句子进行分词
    特征值：每个样本中的，每个特征名出现的次数
    """

    transfer = CountVectorizer()
    new_data = transfer.fit_transform(data)
    print("二维数组形式：\n", transfer.get_feature_names_out())
    print("二维数组形式：\n", new_data.toarray())

    print("原始数据形式:\n", new_data)
    return None


def cut_word(text):
    """
    使用jieba做中文语句风格
    :return:
    """
    # 一个生成器对象
    generator = jieba.cut(text)
    # 用join拼接
    return " ".join(list(generator))


def test2():
    data = ["一种还是一种今天很残酷，明天更残酷，后天很美好，但绝对大部分是死在明天晚上，所以每个人不要放弃今天。",
            "我们看到的从很远星系来的光是在几百万年之前发出的，这样当我们看到宇宙时，我们是在看它的过去。",
            "如果只用一种方式了解某样事物，你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。"]

    data_new = []
    for str_ in data:
        data_new.append(cut_word(str_))

    transfer = CountVectorizer()
    data_bit = transfer.fit_transform(data_new);
    print("二维数组形式：\n", transfer.get_feature_names_out())
    print("二维数组形式：\n", data_bit.toarray())


def pre_process():
    """
        对数据的归一化
        因为有些数据的的量级会比其他的数据打几个数量级
        受最大值最小值影响较大，受异常值处理较为大
        """
    data = pandas.read_csv("dating.txt", sep='\t')
    print(data)
    data = data.iloc[:, :3]
    transfer = MinMaxScaler(feature_range=(2, 3))
    new_data = transfer.fit_transform(data)
    print(new_data)
    return None


def stand_demo():
    """
    标准化，在大数据值的情况下啊，对数据影响较小
    :return:
    """
    data = pandas.read_csv("dating.txt", sep="\t")

    transfer = StandardScaler()
    new_data = transfer.fit_transform(data)
    print(new_data)
    return None


def variance_demo():
    data = pandas.read_csv("dating.txt", sep="\t")
    transfer = VarianceThreshold(threshold=2)
    new_data = transfer.fit_transform(data)
    print(new_data.shape)
    return None


def pearson_demo():
    data = pandas.read_csv("dating.txt", sep="\t")
    r = pearsonr(data["milage"], data["Liters"])
    print("r= ", r)
    return None


def knn_test():
    # 加载数据集
    my_data = load_iris()
    # 划分数据集
    x_train, x_test, y_train, y_test = train_test_split(my_data.data, my_data.target, random_state=22)
    # 实话转换器对象
    transfer = StandardScaler()
    # 使用标准差进行归一化
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 创建预估器
    est = KNeighborsClassifier(n_neighbors=3)
    # 训练算法
    est.fit(x_train, y_train)
    result = est.score(x_test, y_test)
    print(result)
    return None


def knn_grid_search_cv():
    # 加载数据集
    my_data = load_iris()
    # 划分数据集
    x_train, x_test, y_train, y_test = train_test_split(my_data.data, my_data.target, random_state=22)
    # 实话转换器对象
    transfer = StandardScaler()
    # 使用标准差进行归一化
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 创建预估器
    est = KNeighborsClassifier()
    # 注意此处字典的键必须为 n_neighbors
    param_grid = {"n_neighbors": [1, 2, 3, 4]}
    # 加入网格搜索和交叉验证
    est = GridSearchCV(est, param_grid=param_grid, cv=3)
    # 训练算法
    est.fit(x_train, y_train)
    print("最佳参数", est.best_params_)
    print("最佳结果", est.best_score_)
    print("最佳估计器", est.best_estimator_)
    return None


def tree_demo():
    """
    用决策树对鸢尾花进行分类
    :return:
    """
    iris = load_iris()
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=22)
    est = DecisionTreeClassifier(criterion="entropy")
    est.fit(x_train, y_train)

    y_predict = est.predict(x_test)
    print("y_predict\n", y_predict)
    print("直接比对真实值和预测值：\n", y_test == y_predict)
    return None


 if __name__ == "__main__":
     tree_demo()
     print("1")

