import pickle
import re
import nltk
import numpy as np
from sklearn import svm
import scipy.io as scio
def clearEmail(email):
    email = email.lower()
    email = re.sub('<[^<>]+>', ' ', email)  # 移除所有HTML标签
    email = re.sub('(http|https)://[^\s]*', 'httpaddr', email)  # 将所有的URL替换为'httpaddr'
    email = re.sub('[^\s]+@[^\s]+', 'emailaddr', email)  # 将所有的地址替换为'emailaddr'
    email = re.sub('\d+', 'number', email)  # 将所有数字替换为'number'
    email = re.sub('[$]+', 'dollar', email)  # 将所有美元符号($)替换为'dollar'
    stemmer = nltk.stem.PorterStemmer()
    tokens = re.split('[ @$/#.-:&*+=\[\]?!()\{\},\'\">_<;%]', email)  # 把邮件分割成单个的字符串,[]里面为各种分隔符
    tokenList = []
    for i in range(0, len(tokens)):
        tokens[i] = re.sub('[^a-zA-Z0-9]', '', tokens[i])
        try:
            tokens[i] = stemmer.stem(tokens[i])
        except:
            tokens[i] = ''
        if(len(tokens[i]) < 1):
            continue
        tokenList.append(tokens[i])
    return tokenList




emails = []
email1 = open("emailSample1.txt").read()
email2 = open("emailSample2.txt").read()
email3 = open("spamSample1.txt").read()
email4 = open("spamSample2.txt").read()
emails.append(email1)
emails.append(email2)
emails.append(email3)
emails.append(email4)
features = np.zeros((4, 1899))

for i in range(0, len(emails)):
    emails[i] = clearEmail(emails[i])

dict = np.loadtxt("vocab.txt", dtype='str', usecols=1)

print(len(dict))
print(features)


for i in range(0, len(emails)):
    for j in range(0, len(emails[i])):
        for k in range(0, len(dict)):
            if emails[i][j] == dict[k]:
                features[i][k] = 1


f = open('svm.model', 'rb')
s = f.read()
model = pickle.loads(s)


#train = scio.loadmat('spamTrain.mat')
#train_x = train['X']
#train_y = train['y']

#model = svm.SVC(C=0.1, kernel='linear')
#model.fit(train_x, train_y)


res = model.predict(features)

for i in range(0, len(res)):
    print(res[i])
