import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns

url = r"E:/qie.csv"
df = pd.read_csv(url)

# printing the first 5 rows of the datase
print(df.head())

# Let's visualize the distribution of the penguins species with a bar plot in matplotlib
sns.countplot(x='Species', data=df)
plt.show()

# Let's visualize with boxplots how the FlipperLength, CulmenLength and CulmenDepth are distributed for each species
sns.boxplot(data=df, x='Species', y='FlipperLength')
plt.show()
sns.boxplot(data=df, x='Species', y='CulmenLength')
plt.show()
sns.boxplot(data=df, x='Species', y='CulmenDepth')
plt.show()

# Show rows with missing values
print(df.isnull().sum())

# Drop rows with missing values
df = df.dropna()

# Let's prepare for training:
# 1. Split the data into features and labels
# 2. Split the data into training and test sets
X = df[['CulmenLength', 'CulmenDepth', 'FlipperLength']]
y = df['Species']

# Split the data into training and test sets in a way to have 30% of the data for testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# Let's train a Logistic Regression model
# 1. Create a multiclass Logistic Regression model
# 2. Train the model
model = LogisticRegression(multi_class='multinomial', solver='lbfgs')
model.fit(X_train, y_train)

# Create a multiclass Logistic Regression model
# Let's evaluate the model
# 1. Predict the labels of the test set
y_pred = model.predict(X_test)
# 2. Calculate the accuracy of the model
accuracy = accuracy_score(y_test, y_pred)
print(f'Model Accuracy: {accuracy:.2f}')