# -*- coding: utf-8 -*-

import logging
import os
import pickle
import re
import jieba
import nltk
import numpy as np
import pandas as pd
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB

logging.basicConfig(level=logging.INFO)

base_path = os.path.abspath(os.path.dirname(__file__))
logging.info("base path :{}".format(base_path))

def train():

    df = pd.read_table(os.path.join(base_path, 'SMSSpamCollection'),
                       sep='\t',
                       header=None,
                       names=['label', 'message'])

    logging.info("dataset ok...")

    # 预处理

    df['label'] = df.label.map({'ham': 0, 'spam': 1})

    df['message'] = df.message.map(lambda x: x.lower())

    df['message'] = df.message.str.replace('[^\w\s]', '')

    df['message'] = df['message'].apply(nltk.word_tokenize)
    print(df)
    stemmer = PorterStemmer()
    df['message'] = df['message'].apply(lambda x: [stemmer.stem(y) for y in x])
    df = df.dropna()
    df['message'] = df['message'].apply(lambda x: ' '.join(x))
    count_vect = CountVectorizer(decode_error="replace")
    counts = count_vect.fit_transform(df['message'])
    tfidftransformer = TfidfTransformer()
    tfidf = tfidftransformer.fit_transform(counts)
    feature_vect_path = os.path.join(base_path, 'sms_feature_vact.pkl')
    with open(feature_vect_path, 'wb') as f:
        pickle.dump(count_vect.vocabulary_, f)
    logging.info('词向量已保存...')

    tfidftransformer_path = os.path.join(base_path, 'sms_tfidftransformer.pkl')
    with open(tfidftransformer_path, 'wb') as f:
        pickle.dump(tfidftransformer, f)
    logging.info('tf-idf已保存...')


    X_train, X_test, y_train, y_test = train_test_split(
        tfidf,
        df['label'],
        test_size=0.1,
        random_state=69
    )

    # 训练
    logging.info('start train model...')
    model = MultinomialNB().fit(X_train, y_train)

    predicted = model.predict(X_test)

    # 保存训练模型    
    model_file = os.path.join(base_path, "sms_classify_v1.0.pkl")
    with open(model_file, 'wb') as f:
        pickle.dump(model, f)
    logging.info("model saved at: {}".format(model_file))
    return np.mean(predicted == y_test);

def preprocess(email: str):

    # 预处理
    email = email.lower()
    email = re.sub('[^\w\s]', ' ', email)

    # 分词
    email = nltk.word_tokenize(email)
    logging.info('分词后结果: {}'.format(email[:2]))

    # 词干抽取
    email = [PorterStemmer().stem(e) for e in email]

    # 词频向量化

    email = ' '.join(email)

    # 加载词向量
    feature_vect_path = os.path.join(base_path, 'sms_feature_vact.pkl')
    loaded_vec = CountVectorizer(
        decode_error="replace",
        vocabulary=pickle.load(open(feature_vect_path, "rb"))
    )
    logging.info('词向量加载完成...')

    # 加载tf-idf
    tfidftransformer_path = os.path.join(base_path, 'sms_tfidftransformer.pkl')
    tfidftransformer = pickle.load(open(tfidftransformer_path, "rb"))
    counts_vec = tfidftransformer.transform(loaded_vec.transform([email]))
    logging.info('tf-idf加载完成...')

    return counts_vec.toarray()


def predict(data):

    labels = ['ham', 'spam']

    data = preprocess(data)

    model_file = os.path.join(base_path, "sms_classify_v1.0.pkl")
    model = pickle.load(open(model_file, 'rb'))

    result = model.predict(data)
    logging.info('predict result: {}'.format(result[0]))

    return labels[result[0]]

if __name__ == "__main__":
    train()
