import numpy as np
import urllib.request
from sklearn.model_selection import train_test_split
url =" http://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data"
raw_data = urllib.request.urlopen(url)
dataset = np.loadtxt(raw_data,delimiter=",")
x = dataset[:,0:-1]
m = np.median(x, axis = 0)
x = (x>m)*2+(x<=m)*1; # making the feature vectors binary
y = dataset[:,-1]
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size = 0.3, random_state =17)

class NaiveBayesClassifier:
    def fit(self, x_train, y_train):
        self.classes = np.unique(y_train)
        self.priors = {c: np.mean(y_train == c) for c in self.classes}
        self.likelihoods = {}
        
        for c in self.classes:
            subset = x_train[y_train == c]
            self.likelihoods[c] = {
                i: {val: np.mean(subset[:, i] == val) for val in np.unique(x_train[:, i])}
                for i in range(x_train.shape[1])
            }

    def predict(self, x_test):
        predictions = []
        for x in x_test:
            posteriors = {}
            for c in self.classes:
                posterior = np.log(self.priors[c])  # Use log to avoid underflow
                for i, val in enumerate(x):
                    if val in self.likelihoods[c][i]:
                        posterior += np.log(self.likelihoods[c][i][val])
                    else:
                        posterior += np.log(1e-6)  # Small probability for unseen values
                posteriors[c] = posterior
            predictions.append(max(posteriors, key=posteriors.get))
        return np.array(predictions)

# train and evaluate the classifier
nb_classifier = NaiveBayesClassifier()
nb_classifier.fit(x_train, y_train)
y_pred = nb_classifier.predict(x_test)

# compute test error
test_error = np.mean(y_pred != y_test)
print(test_error)
