# Optimize the SGD implementation by using mini-batches and reducing iterations
num_iterations = 200  # Reduce the number of outer iterations for faster testing
batch_size = 32  # Use mini-batches instead of single-sample updates

# Reinitialize parameters
w_sgd = np.zeros(num_features)
b_sgd = 0

# Store training accuracy per iteration
training_accuracies_sgd = []

# Mini-batch Stochastic Gradient Descent for SVM
for j in range(1, num_iterations + 1):
    # Shuffle training data indices
    indices = np.random.permutation(len(train_labels))
    
    for batch_start in range(0, len(train_labels), batch_size):
        batch_indices = indices[batch_start:batch_start + batch_size]
        x_batch = train_data[batch_indices]
        y_batch = train_labels[batch_indices]
        
        # Compute margin
        margin = y_batch * (x_batch @ w_sgd + b_sgd)
        mask = margin < 1  # Points inside the margin or misclassified
        
        # Compute gradient
        grad_w = (1 / len(train_labels)) * w_sgd - C * np.sum(y_batch[mask][:, np.newaxis] * x_batch[mask], axis=0)
        grad_b = -C * np.sum(y_batch[mask])
        
        # Learning rate
        alpha_j = eta_0 / (1 + j * eta_0)
        
        # Update parameters
        w_sgd -= alpha_j * grad_w
        b_sgd -= alpha_j * grad_b
    
    # Compute training accuracy
    predictions_sgd = np.sign(train_data @ w_sgd + b_sgd)
    accuracy_sgd = np.mean(predictions_sgd == train_labels)
    training_accuracies_sgd.append(accuracy_sgd)

# Plot iteration vs. training accuracy
plt.figure(figsize=(8, 5))
plt.plot(range(1, len(training_accuracies) + 1), training_accuracies, label="Batch Gradient Descent", linestyle='dashed')
plt.plot(range(1, len(training_accuracies_sgd) + 1), training_accuracies_sgd, label="Mini-batch SGD", linestyle='solid')
plt.xlabel("Iteration")
plt.ylabel("Accuracy")
plt.title("Iteration vs. Training Accuracy")
plt.legend()
plt.grid()
plt.show()
