from sklearn.datasets import load_iris

"""
1 sklearn数据集
load_*()一般数据集比较小
fetch_*(data_home="./data")一般数据集比较大，data_home指定数据集的下载路径
"""
data = load_iris()

print(type(data))

print("样本1:\n",data.get("data")[0])
print("特征值数组：", data.get("data"))
print("特征值数组长度：", len(data.get("data")))

print("目标值数组：", data.get("target"))
print("目标值数组长度：", len(data.get("target")))

print("数据描述：", data.get("DESCR"))
print("特征名：", data.get("feature_names"))
print("目标名：", data.get("target_names"))

"""
2 数据集的划分 = 训练数据集 + 测试数据集
from sklearn.model_selection import train_test_split(数据集的特征值x，数据集的标签值y，测试集的占比float，随机数种子不同种子造成不同的随机采样结果)
"""
print("*" * 50)
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(data.get("data"), data.get("target"), test_size=0.2, random_state=12)

print("训练集特征值：\n",x_train,"\n",x_train.shape)
print("训练集目标值：\n",y_train)

print("测试集目标值：\n",x_test)
print("测试集目标值：\n",y_test)
