import pandas as pd
url = "https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/penguins.csv"
df = pd.read_csv(url)
# 打印前 5 行
print(df.head(5))

#在 matplotlib 中用条形图可视化企鹅物种的分布
#Let's visualize the distribution of the penguins species with a bar plot in matplotlib
import matplotlib.pyplot as plt

#获取企鹅物种的数量
species_counts = df['Species'].value_counts()
species_counts.plot.bar(title="Numer of penguins by species")

# 显示图形
plt.show()

# 关闭图形
plt.close()

#用箱线图来可视化每个物种的 FlipperLength、CulmenLength 和 CulmenDepth 是如何分布的
#Let's visualize with boxplots how the FlipperLength, CulmenLength and CulmenDepth are distributed for each species
# importing seaborn
# 使用 seaborn 绘制箱线图
import seaborn as sns

# 绘制 FlipperLength 箱线图
sns.boxplot(data=df, x="Species", y="FlipperLength")
plt.title("Distribution of FlipperLength by Species")
plt.show()

# 绘制 CulmenLength 箱线图
sns.boxplot(data=df, x="Species", y="CulmenLength")
plt.title("Distribution of CulmenLength by Species")
plt.show()

# 绘制 CulmenDepth 箱线图
sns.boxplot(data=df, x="Species", y="CulmenDepth")
plt.title("Distribution of CulmenDepth by Species")
plt.show()

# 关闭图形
plt.close()

# Show rows with missing values
# 显示缺失值
print(df[df.isnull().any(axis=1)])

# 关闭图形
plt.close()

# Drop rows with missing values
# 删除缺失值
df = df.dropna()

# 打印删除缺失值后的前 5 行
print(df.head(5))

#再次检查缺失值
print(df.isnull().sum())

#删除完全为空的值
df = df.dropna(how='all')

#重新打印前5行
print(df.head(5))

#再次检查缺失值
print(df.isnull().sum())

#查看数据类型、
print(df.dtypes)

# 关闭图形
plt.close()

#Split the data into features and labels
features = df[['CulmenLength', 'CulmenDepth', 'FlipperLength']]
labels = df['Species']

# Split the data into training and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=42)

# Print the shapes of the training and test sets
print("Training set shapes:", X_train.shape, y_train.shape)
print("Test set shapes:", X_test.shape, y_test.shape)

#将数据拆分为训练集和测试集
# Split the data into training and test sets in a way to have 30% of the data for testing
from sklearn.model_selection import train_test_split

# 假设df是您的DataFrame，并且已经包含了所有的数据
features = df[['CulmenLength', 'CulmenDepth', 'FlipperLength']]
labels = df['Species']

# 使用train_test_split函数分割数据集
# test_size=0.3 表示30%的数据将被用作测试集
# random_state是一个种子值，确保每次分割的结果都是一样的
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.3, random_state=42)

# 打印训练集和测试集的形状，以确认数据是否正确分割
print("Training features shape:", X_train.shape)
print("Test features shape:", X_test.shape)
print("Training labels shape:", y_train.shape)
print("Test labels shape:", y_test.shape)

#创建一个用于多类分类的逻辑回归模型
#Let's train a Logistic Regression model
# 1. Create a multiclass Logistic Regression model
# 2. Train the model

# Create a multiclass Logistic Regression model
from sklearn.linear_model import LogisticRegression

# 创建逻辑回归模型实例，设置最大迭代次数为1000以确保收敛
model = LogisticRegression(multi_class='multinomial', solver='lbfgs', max_iter=1000, random_state=42)

# 训练模型
model.fit(X_train, y_train)

#评估逻辑回归模型性能
# Let's evaluate the model
# 1. Predict the labels of the test set
# 2. Calculate the accuracy of the model
# 预测测试集的标签
y_pred = model.predict(X_test)

# 计算模型的准确率
accuracy = model.score(X_test, y_test)

# 打印准确率
print("Model accuracy:", accuracy)

