import pandas as pd
import re
import json
import numpy as np
import nltk
from sklearn.metrics import accuracy_score, classification_report
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
import jieba as jb

# 读入数据
comment_data=pd.read_excel('havelabel.xlsx')
# print(comment_data.shape)
# print(comment_data.head())

# 数据预处理删除所有记录中包含的空值或空字符串
comment_data=comment_data.dropna()
guolv=comment_data['Notes']!=' '
comment_data=comment_data[guolv]
# print(comment_data.shape)

# 将label转换成id0，1，2这样
comment_data['label_id']=comment_data['Lab Status'].factorize()[0]
comment_id = comment_data[['Lab Status', 'label_id']].drop_duplicates().sort_values('label_id').reset_index(drop=True)

# 加载停用词
stopwords = nltk.corpus.stopwords.words("english")

# 分词并过滤停用词
comment_data['cNotes']=comment_data['Notes'].apply(lambda x: " ".join([w for w in list(jb.cut(x)) if w not in stopwords]))
# print(comment_data)

# LSTM建模
MAX_NB_WORDS = 50000
# 每条cut_review最大的长度
MAX_SEQUENCE_LENGTH = 250
# 设置Embeddingceng层的维度
EMBEDDING_DIM = 100

tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(comment_data['cNotes'].values)
word_index = tokenizer.word_index
# print('共有 %s 个不相同的词语.' % len(word_index))

X = tokenizer.texts_to_sequences(comment_data['cNotes'].values)
# 填充X,让X的各个列的长度统一
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)

# 多类标签的onehot展开
Y = pd.get_dummies(comment_data['label_id']).values
# print(X.shape,Y.shape)
# print(X)
# print(Y)

# 拆分训练集和测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.10, random_state=42)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)

# 定义模型
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()

# 模型训练
epochs = 5
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1,
                    callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
accr = model.evaluate(X_test, Y_test)
print('Test set\n  Loss: {:0.3f}\n  Accuracy: {:0.3f}'.format(accr[0], accr[1]))

# LSTM模型的评估
y_pred = model.predict(X_test)
y_pred = y_pred.argmax(axis=1)
Y_test = Y_test.argmax(axis=1)

print('accuracy %s' % accuracy_score(y_pred, Y_test))
print(classification_report(Y_test, y_pred, target_names=comment_id['Lab Status'].values))


def predict(text):
    txt = text
    txt = [" ".join([w for w in list(jb.cut(txt.lower())) if w not in stopwords])]
    seq = tokenizer.texts_to_sequences(txt)
    padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)
    pred = model.predict(padded)
    cat_id = pred.argmax(axis=1)[0]
    return comment_id[comment_id.label_id == cat_id]['Lab Status'].values[0]

# 分类
unpro=pd.read_excel('unpro.xlsx')
# print(unpro['Notes'][len(unpro)-1])
# print()
unpro['nlplabel'] = unpro['Lab Status']
for i in range(len(unpro)):
    unpro['nlplabel'][i]=predict(unpro['Notes'][i])
unpro.to_excel('unprolabelnlp.xlsx')





