# from sklearn.datasets import load_iris
# iris_datasets=load_iris()
# print(iris_datasets,type(iris_datasets))
# load_ 小数据集
# fetch_ 大数据集
# from sklearn.datasets import load_iris
# from sklearn.model_selection import train_test_split
# iris=load_iris()
# # print("鸢尾花数据集：\n",iris)
# # print("查看数据集描述：\n",iris["DESCR"])
# # print("查看特征值：\n",iris.data,iris.data.shape)
# # print("查看特征值名字：\n",iris.feature_names)
# # print("=========================================================")
# # print(iris.target)
#
# x_train,x_test,y_train,y_test=train_test_split(iris.data,iris.target,test_size=0.2,random_state=22)
# print("训练集的特征值：\n",x_train,x_train.shape)

# 特征提取
# 字典类型：数据集中类别特征比较多时（city+temperature）
# from sklearn.feature_extraction import DictVectorizer
# data=[{'city': '北京','temperature':100},{'city': '上海','temperature':60},
# {'city': '深圳','temperature':30}]
# transfer=DictVectorizer(sparse=False)
# data_new=transfer.fit_transform(data)
# print(data_new)
# print("特征名字：\n",transfer.get_feature_names_out())

# 文本特征提取
# 单词
# from sklearn.feature_extraction.text import CountVectorizer
# data=["life is short,i like like python",
# "life is too long,i dislike python"]
# transfer=CountVectorizer(stop_words=["is","too"])
# data_new=transfer.fit_transform(data)
# print("data_new\n",data_new.toarray())
# print("特征名字：\n",transfer.get_feature_names_out())

# 中文 注意空格
# from sklearn.feature_extraction.text import CountVectorizer
# data=["我 爱 北京 天安门",
# "天安门 上 太阳 升"]
# transfer=CountVectorizer()
# data_new=transfer.fit_transform(data)
# print("data_new\n",data_new.toarray())
# print("特征名字：\n",transfer.get_feature_names_out())

# # 中文文本特征提取
# import jieba
# from sklearn.feature_extraction.text import CountVectorizer
# def cut_word(text):
#     return " ".join(list(jieba.cut(text)))
# data=["今天很残酷，明天更残酷，后天很美好，但绝对大部分是死在明天晚上，所以每个人不要放弃今天。",
# "我们看到的从很远星系来的光是在几百万年之前发出的，这样当我们看到宇宙时，我们是在看它的过去。",
# "如果只用一种方式了解某样事物，你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。"]
# data_new=[]
# for sentence in data:
#     data_new.append(cut_word(sentence))
# # print(data_new)
# transfer=CountVectorizer()
# data_final=transfer.fit_transform(data_new)
# print(data_final.toarray(),"\n")
# print(transfer.get_feature_names_out())



# tfidf
# import jieba
# from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
# def cut_word(text):
#     return " ".join(list(jieba.cut(text)))
# data=["今天很残酷，明天更残酷，后天很美好，但绝对大部分是死在明天晚上，所以每个人不要放弃今天。",
# "我们看到的从很远星系来的光是在几百万年之前发出的，这样当我们看到宇宙时，我们是在看它的过去。",
# "如果只用一种方式了解某样事物，你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。"]
# data_new=[]
# for sentence in data:
#     data_new.append(cut_word(sentence))
# # print(data_new)
# transfer=TfidfVectorizer()
# data_final=transfer.fit_transform(data_new)
# print(data_final.toarray(),"\n")
# print(transfer.get_feature_names_out())

# 特征预处理
# 归一化 标准化 无量纲化
# import pandas as pd
# from sklearn.preprocessing import MinMaxScaler
# data=pd.read_csv("dating.txt")
# print(type(data))
# # print("data:\n",data)
# data =data.iloc[:,:3]
# transfer=MinMaxScaler()
# data_new=transfer.fit_transform(data)
# print(data_new)
# from sklearn.preprocessing import StandardScaler
# import pandas as pd
# from sklearn.preprocessing import MinMaxScaler
# data=pd.read_csv("dating.txt")
# print(type(data))
# # print("data:\n",data)
# data =data.iloc[:,:3]
# transfer=StandardScaler()
# data_new=transfer.fit_transform(data)
# print(data_new)






