import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# Load the dataset
train_data = pd.read_csv("/mnt/data/digits_training_data.csv", header=None).values
train_labels = pd.read_csv("/mnt/data/digits_training_labels.csv", header=None).values.ravel()
test_data = pd.read_csv("/mnt/data/digit_test_data.csv", header=None).values
test_labels = pd.read_csv("/mnt/data/digit_test_labels.csv", header=None).values.ravel()

# Convert labels from {0,1} to {-1,1} for SVM
train_labels = np.where(train_labels == 0, -1, 1)
test_labels = np.where(test_labels == 0, -1, 1)

# Initialize parameters
num_features = train_data.shape[1]
w = np.zeros(num_features)
b = 0

# Hyperparameters
C = 3  # Slack cost
eta_0 = 0.001  # Initial learning rate
num_iterations = 1000  # Number of iterations

# Store training accuracy per iteration
training_accuracies = []

# Batch Gradient Descent for SVM
for j in range(1, num_iterations + 1):
    # Compute gradients
    margin = train_labels * (train_data @ w + b)
    mask = margin < 1  # Points that are inside the margin or misclassified
    
    grad_w = w - C * np.sum(train_labels[mask][:, np.newaxis] * train_data[mask], axis=0)
    grad_b = -C * np.sum(train_labels[mask])
    
    # Learning rate
    alpha_j = eta_0 / (1 + j * eta_0)
    
    # Update parameters
    w -= alpha_j * grad_w
    b -= alpha_j * grad_b
    
    # Compute training accuracy
    predictions = np.sign(train_data @ w + b)
    accuracy = np.mean(predictions == train_labels)
    training_accuracies.append(accuracy)

# Plot iteration vs. training accuracy
plt.figure(figsize=(8, 5))
plt.plot(range(1, num_iterations + 1), training_accuracies, label="Training Accuracy")
plt.xlabel("Iteration")
plt.ylabel("Accuracy")
plt.title("Iteration vs. Training Accuracy")
plt.legend()
plt.grid()
plt.show()
