from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import jieba

def datasets_dome():

    iri = load_iris()

    return iri

def dict_test():
    data = [
        {
            'city': '北京',
            'temperature': 100
        },
        {
            'city': '上海',
            'temperature': 60
        },
        {
            'city': '深圳',
            'temperature': 30
        },
    ]
    # 稀疏矩阵将非0值按照位置表示出来
    transfer = DictVectorizer(sparse=True) # 返回一个稀疏矩阵,需要返回非稀疏矩阵就设置成False
    data_val = transfer.fit_transform(data)
    featureNames = transfer.get_feature_names_out() # 或者原数据
    print(featureNames)
    print(data_val)
    return data_val

def text_test():
    # 文本特征抽取
    data = [
        "我爱洗澡，噢噢噢噢",
        "我不爱洗澡，呜呜呜呜"
    ]

    # 实例化
    transfer = CountVectorizer(stop_words=['噢噢噢噢'])
    data_val = transfer.fit_transform(data)
    data_val_array = data_val.toarray()
    featureNames = transfer.get_feature_names_out()
    print('------------------------')
    print(data_val)
    print('------------------------')
    print(data_val_array)
    print('------------------------')
    print(featureNames)

def chain_cut_word_test():
    text = """将数据集的特征转换成字典类型"""
    # 进行中文分词
    text_temp = " ".join(list(jieba.cut(text)))

    data_list = []

    for i in text_temp:
        data_list.append(i)

    print(data_list)

    transfer = CountVectorizer(stop_words=[''])
    data_val = transfer.fit_transform(data_list)
    data_val_array = data_val.toarray()
    featureNames = transfer.get_feature_names_out()

    print(data_val_array)
    print(featureNames)

    return text_temp

def tfidf_test():
    data = [
        "我爱洗澡，噢噢噢噢",
        "我不爱洗澡，呜呜呜呜"
    ]

    # 实例化
    transfer = TfidfVectorizer()
    data_val = transfer.fit_transform(data)
    data_val_array = data_val.toarray()
    featureNames = transfer.get_feature_names_out()
    print('------------------------')
    print(data_val)
    print('------------------------')
    print(data_val_array)
    print('------------------------')
    print(featureNames)

if __name__ == '__main__':
    iri = datasets_dome() # 鸢尾花数据集
    descr = iri["DESCR"] # 数据集描述
    featureNames = iri.feature_names # 特征值名称
    data = iri.data # 特征值名称二维数组
    dataShape = iri.data.shape # 特征值名称一维数组
    # print(iri)
    # print('-------------------------')
    # print(dataShape)

    # print('-------------------------数据集划分-------------------------')
    # x_train,x_test,y_train,y_test = train_test_split(iri.data, iri.target, test_size=0.2, random_state=22)
    # print('训练集特征： ',x_train,x_train.shape)
    # print('测试集特征： ',y_train,y_train.shape)

    # data_val = dict_test()
    # data_val = text_test()
    data_val = tfidf_test()
    # print(data_val)