import numpy as np
import pandas as pd

data = pd.read_csv('datasets_ML2/bayes_lihang.txt')

mails = data.iloc[:, :-1].values
labels = data.iloc[:, -1:].values
labels = (labels+1)/2
print("x, y:", mails, labels)

testMails = [[2, 'S']]

# class prob
class_p1 = sum(labels)/len(labels)
class_p0 = 1 - class_p1

# create vocab
def createVocab(mails):
    vocab = set([])
    for mail in mails:
        vocab = vocab | set(mail)
    return list(vocab)

# vector mat
def vectorMat(vocab, mails):
    vector_mat = []
    for mail in mails:
        vector = np.zeros(len(vocab))
        for word in mail:
            if word in vocab:
                vector[vocab.index(word)] = 1
            else:
                print("the word %s is not my vocab! " % word)
        vector_mat.append(vector)
    return np.array(vector_mat)

# word frequence
def trainNB(vector_mat, labels):
    m, n = vector_mat.shape
    num0 = np.ones(n)
    num1 = np.ones(n)
    for i in range(m):
        if labels[i] == 0:
            num0 = num0 + vector_mat[i, :]
        else:
            num1 = num1 + vector_mat[i, :]
    vect_p0 = np.log(num0 / sum(num0))
    vect_p1 = np.log(num1 / sum(num1))
    return vect_p0, vect_p1

# test mail
def classifyNB(test_vect, vect_p0, vect_p1):
    for sample_vect in test_vect:
        sample_p0 = sum(sample_vect * vect_p0) + np.log(class_p0)
        sample_p1 = sum(sample_vect * vect_p1) + np.log(class_p1)
        if sample_p1 > sample_p0:
            print("class: ", 1*2-1)
        else:
            print("class: ", 0*2-1)

# main program
# first step
vocab = createVocab(mails)
# second step
vector_mat = vectorMat(vocab, mails)
# third step
vect_p0, vect_p1 = trainNB(vector_mat, labels)
# forth step
vector_mat = vectorMat(vocab, testMails)
classifyNB(vector_mat, vect_p0, vect_p1)
