import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import matplotlib.pyplot as plt

# Step 1: Download and preprocess Wine dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
columns = ["Class", "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium",
           "Total phenols", "Flavanoids", "Nonflavanoid phenols", "Proanthocyanins",
           "Color intensity", "Hue", "OD280/OD315 of diluted wines", "Proline"]
data = pd.read_csv(url, header=None, names=columns)

# Filter data for Class 1 and Class 2
filtered_data = data[data['Class'].isin([1, 2])]
X = filtered_data.iloc[:, 1:].values  # Features
y = filtered_data['Class'].values    # Labels

# Step 2: Apply PCA for dimensionality reduction
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)

# Step 3: Apply LDA for dimensionality reduction
lda = LDA(n_components=1)
X_lda = lda.fit_transform(X, y)

# Output PCA results
print("PCA reduced features (first 5 samples):\n", X_pca[:5])

# Visualize PCA and LDA results
plt.figure(figsize=(12, 5))

# PCA Scatter Plot
plt.subplot(1, 2, 1)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis', edgecolor='k', s=50)
plt.title('PCA (2D)')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')

# LDA Scatter Plot
plt.subplot(1, 2, 2)
plt.scatter(X_lda[:, 0], np.zeros_like(X_lda[:, 0]), c=y, cmap='viridis', edgecolor='k', s=50)
plt.title('LDA (1D)')
plt.xlabel('Linear Discriminant 1')
plt.yticks([])

plt.tight_layout()
plt.show()
