from keras import Input, Model
from keras.layers import Dense, Conv1D, Embedding, GlobalMaxPooling1D, Concatenate, Dropout, Flatten, LSTM, \
    Bidirectional
from keras import backend as K
from keras_multi_head import MultiHeadAttention
from tensorflow.python.keras.layers import Lambda
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
import joblib


'''
return_sequences=True会返回每个时间步的隐藏状态，=False只会返回最后一个时间步的隐藏状态
return_state=True时会返回三个变量：lstm, state_h, state_c，
lstm为最后一个时间步的隐藏状态或每个时间步的隐藏状态（取决于return_sequences的值），
state_h为最后一个时间步的隐藏状态，
state_c为最后一个时间步的cell状态
'''


# Textcnn
class TextCnn:
    def __init__(self, maxlen, dims, class_num=2, last_activation='softmax'):
        self.maxlen = maxlen
        self.class_num = class_num
        self.last_activation = last_activation
        self.dims = dims

    def get_model(self):
        input = Input((self.maxlen, self.dims,))
        convs = []
        for kernel_size in [3, 4, 5]:
            c = Conv1D(128, kernel_size, activation='relu')(input)
            c = GlobalMaxPooling1D()(c)
            convs.append(c)
        x = Concatenate()(convs)
        x = Dropout(rate=0.4)(x)
        output = Dense(self.class_num, activation=self.last_activation)(x)
        model = Model(inputs=input, outputs=output)
        model.summary()
        return model


# lstm
class TextLstm:
    def __init__(self, maxlen, dims, class_num=2, last_activation='softmax'):
        self.maxlen = maxlen
        self.class_num = class_num
        self.last_activation = last_activation
        self.dims = dims

    def get_model(self):
        input = Input((self.maxlen, self.dims,))
        output = LSTM(units=128, activation='relu', return_sequences=True, dropout=0.2, recurrent_dropout=0.2)(input)
        output = Dense(units=256, activation='relu')(output)
        output = Dropout(rate=0.4)(output)
        output = Dense(units=self.class_num, activation=self.last_activation)(output)
        model = Model(inputs=input, outputs=output)
        model.summary()
        return model


# Bilstm
class TextBilstm:

    def __init__(self, maxlen, dims, class_num=2, last_activation='softmax'):
        self.maxlen = maxlen
        self.class_num = class_num
        self.last_activation = last_activation
        self.dims = dims

    def get_model(self):
        input = Input((self.maxlen, self.dims,))
        output = Bidirectional(
            LSTM(units=128, activation='relu', return_state=True, dropout=0.2, recurrent_dropout=0.2))(input)
        output = Dense(units=256, activation='relu')(output)
        output = Dropout(rate=0.4)(output)
        output = Dense(units=self.class_num, activation=self.last_activation)(output)
        model = Model(inputs=input, outputs=output)
        model.summary()
        return model


# Attention+Bilstm
class TextBilstmAttention:
    def __init__(self, maxlen, dims, class_num=2, last_activation='softmax', head_nums=2):
        self.maxlen = maxlen
        self.class_num = class_num
        self.last_activation = last_activation
        self.dims = dims
        self.head_nums = head_nums

    def get_model(self):
        input = Input((self.maxlen, self.dims,))
        output = Bidirectional(
            LSTM(units=128, activation='relu', return_state=True, dropout=0.2, recurrent_dropout=0.2,
                 return_sequences=True))(input)
        output = MultiHeadAttention(head_num=self.head_nums)(output)
        output = Dense(units=256, activation='relu')(output)
        output = Dropout(rate=0.4)(output)
        output = Dense(units=self.class_num, activation=self.last_activation)(output)
        model = Model(inputs=input, outputs=output)
        model.summary()
        return model


# TextRcnn
class TextRcnn:
    def __init__(self, maxlen, dims, class_num=2, last_activation='softmax'):
        self.maxlen = maxlen
        self.class_num = class_num
        self.last_activation = last_activation
        self.dims = dims

    def get_model(self):
        input_current = Input((self.maxlen,self.dims))
        input_left = Input((self.maxlen,self.dims))
        input_right = Input((self.maxlen,self.dims))

        x_left = Bidirectional(LSTM(128, return_sequences=True))(input_left)
        x_right = Bidirectional(LSTM(128, return_sequences=True, go_backwards=True))(input_right)
        x_right = Lambda(lambda x: K.reverse(x, axes=1))(x_right)
        x = Concatenate(axis=2)([x_left, input_current, x_right])

        x = Conv1D(filters=64, kernel_size=3, activation='relu')(x)
        x = GlobalMaxPooling1D()(x)
        x = Dense(units=128,activation='relu')(x)
        x = Dropout(rate=0.4)(x)
        x = Dense(units=128,activation='relu')(x)
        output = Dense(self.class_num, activation=self.last_activation)(x)
        model = Model(inputs=[input_current, input_left, input_right], outputs=output)
        model.summary()
        return model


# Naive_bayes
class NaiveBayes:
    def __init__(self, classifier=MultinomialNB()):
        self.classifier = classifier
        self.vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 4), max_features=20000)

    def features(self, x):
        return self.vectorizer.transform(x)

    def fit(self, x, y):
        self.vectorizer.fit(x)
        self.classifier.fit(self.features(x), y)

    def predict(self, x):
        return self.classifier.predict(self.features([x]))

    def score(self, x, y):
        return self.classifier.score(self.features(x), y)

    def save_model(self, path):
        joblib.dump((self.classifier, self.vectorizer), path)

    def load_model(self, path):
        self.classifier, self.vectorizer = joblib.load(path)

# Svm
class Svm():
    def __init__(self, classifier=SVC(kernel='linear')):
        self.classifier = classifier
        self.vectorizer = TfidfVectorizer(analyzer='word', ngram_range=(1, 4), max_features=20000)

    def features(self, x):
        return self.vectorizer.transform(x)

    def fit(self, x, y):
        self.vectorizer.fit(x)
        self.classifier.fit(self.features(x), y)

    def predict(self, x):
        return self.classifier.predict(self.features([x]))

    def score(self, X, y):
        return self.classifier.score(self.features(X), y)

    def save_model(self, path):
        joblib.dump((self.classifier, self.vectorizer), path)

    def load_model(self, path):
        self.classifier, self.vectorizer = joblib.load(path)

# 逻辑回归
class Logistic():
    def __init__(self,classifier = LogisticRegression()):
        self.classifier = classifier
        self.vectorizer = TfidfVectorizer(analyzer='word', ngram_range=(1, 4), max_features=20000)

    def features(self, x):
        return self.vectorizer.transform(x)

    def fit(self,x,y):
        self.vectorizer.fit(x)
        self.classifier.fit(self.features(x), y)

    def predict(self, x):
        return self.classifier.predict(self.features([x]))

    def score(self, X, y):
        return self.classifier.score(self.features(X), y)

    def save_model(self, path):
        joblib.dump((self.classifier, self.vectorizer), path)

    def load_model(self, path):
        self.classifier, self.vectorizer = joblib.load(path)




