# -*- coding: utf-8 -*-

from gensim.models.word2vec import Word2Vec
from numpy import array
from sklearn.externals import joblib
from sklearn.naive_bayes import GaussianNB

# from gensim.models.doc2vec import Doc2Vec
import jieba
import warnings

from data_source import FileSource
from dict_model.WordFilter import filter_stop
from config import pos_path, neg_path, data_path


def sent2word(sentence):
    word_gen = jieba.cut(sentence, cut_all=False)
    word_list = []
    for word in word_gen:
        word_list.append(word)
    word_list = filter_stop(word_list)
    return word_list


def amplify(vec):
    return (vec * 1e4) ** 2


class Sentiment:
    def __init__(self):
        self.classifier = GaussianNB()
        self.profiler = Word2Vec()
        self.word_source = FileSource()
        self.pos_source = FileSource()
        self.neg_source = FileSource()
        self.word_vec = []
        self.label_vec = []

    def train(self):
        self.word_source.filter = sent2word
        self.word_source.open(pos_path)
        self.word_source.open(neg_path)
        # train Word2Vec
        self.profiler.build_vocab(self.word_source)
        self.profiler.train(self.word_source)
        # get word vector
        # pos
        self.pos_source.filter = sent2word
        self.pos_source.open(pos_path)
        pos_profiler = Word2Vec()
        pos_profiler.build_vocab(self.pos_source)
        for word in pos_profiler.vocab:
            self.word_vec.append(amplify(self.profiler[word]))
            self.label_vec.append(1)  # pos
        # neg
        self.neg_source.filter = sent2word
        self.neg_source.open(neg_path)
        neg_profiler = Word2Vec()
        neg_profiler.build_vocab(self.neg_source)
        for word in neg_profiler.vocab:
            self.word_vec.append(amplify(self.profiler[word]))
            self.label_vec.append(-1)  # neg
        # train naive bayes
        self.classifier.fit(self.word_vec, self.label_vec)

    def predict(self, sentence):
        word_list = sent2word(sentence)
        # self.profiler.update_vocab(word_list)
        result = []
        for word in word_list:
            word_vec = amplify(self.profiler[word])
            # ignore warning
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=DeprecationWarning)
                word_sent = self.classifier.predict(array(word_vec))  # bayes classify
            result.append(word_sent[0])
        return sum(result) - 2

    def test(self):
        self.load()
        cases = [
            '大家好才是真的好',
            '这是一本好书',
            '这本书就是垃圾',
            '这种玩意还是扔掉好',
            '非常非常喜欢'
        ]
        for case in cases:
            result = self.predict(case)
            print(case, result)

    def save(self, **kwargs):
        # save word2vec
        filename = kwargs.get('word2vec')
        if filename:
            filename = data_path(filename)
        else:
            filename = data_path('word2vec.model')
        self.profiler.save(filename)
        # save bayes
        filename = kwargs.get('bayes')
        if filename:
            filename = data_path(filename)
        else:
            filename = data_path('bayes')
        joblib.dump(self.classifier, filename)

    def load(self, **kwargs):
        # load word2vec
        filename = kwargs.get('word2vec')
        if filename:
            filename = data_path(filename)
        else:
            filename = data_path('word2vec.model')
        self.profiler = Word2Vec.load(filename)
        # load bayes
        filename = kwargs.get('bayes')
        if filename:
            filename = data_path(filename)
        else:
            filename = data_path('bayes')
        self.classifier = joblib.load(filename)


if __name__ == '__main__':
    s = Sentiment()
    s.train()
    # s.save()
    s.test()
