# !pip install pandas==0.23.4 -i "https://mirrors.aliyun.com/pypi/simple/"
# !pip install numpy==1.19.0 -i "https://mirrors.aliyun.com/pypi/simple/"
# !pip install wrapt --ignore-installed -i "https://mirrors.aliyun.com/pypi/simple/"
# !pip install tensorflow==1.14.0 -i "https://mirrors.aliyun.com/pypi/simple/"
# !pip install keras==2.3.1 -i "https://mirrors.aliyun.com/pypi/simple/"
# !pip install bert4keras -i "https://mirrors.aliyun.com/pypi/simple/"


import numpy as np
import pandas as pd


# EDA
df_train = pd.read_csv('D:/wk/data/personal-NLP-learning/train_set.csv', sep='\t')
df_test = pd.read_csv('D:/wk/data/personal-NLP-learning/test_a.csv', sep='\t')
print(df_train.head())
print(df_test.head())
# 类别分布
print(df_train['label'].value_counts())
df_train['text'] = df_train['text'].apply(lambda x: list(map(lambda y: int(y), x.split())))
df_test['text'] = df_test['text'].apply(lambda x: list(map(lambda y: int(y), x.split())))

# 文本长度分布
df_train['text'].map(lambda x: len(x)).describe()
df_test['text'].map(lambda x: len(x)).describe()

# 构造词典
vocab = dict()
for text in df_test['text']:
    print("text:" + text)
    for word in text:
        print("wprd: "+ word)
        if vocab.get(word):
            vocab[word] += 1
        else:
            vocab[word] = 1
print(len(vocab))
chars = sorted(vocab.items(), key=lambda x: x[0])
print(chars[:10])
print(chars[-10:])
chars = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
print(chars[:10])

from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.layers import *
from keras.models import Model
from keras.optimizers import Adam

from bert4keras.snippets import sequence_padding, DataGenerator


# 训练
## 超参数
SEED = 2020
num_classes = 14
vocabulary_size = 7600

maxlen = 1024
batch_size = 512
embedding_dim = 256
num_filters = 512
filter_sizes = [3, 4, 5]
drop = 0.5
lr = 1e-4
epochs = 20

## 加载数据
df_train, df_valid = train_test_split(df_train, test_size=0.2, random_state=SEED)
def load_data(df):
    """加载数据"""
    D = list()
    for _, row in df.iterrows():
        text = row['text']
        label = row['label']
        D.append((text, int(label)))
    return D

train_data = load_data(df_train)
valid_data = load_data(df_valid)

class data_generator(DataGenerator):
    """数据生成器"""

    def __init__(self, data, batch_size=32, buffer_size=None, random=False):
        super().__init__(data, batch_size, buffer_size)
        self.random = random

    def __iter__(self, random=False):
        batch_token_ids, batch_labels = [], []
        for is_end, (text, label) in self.sample(random):
            token_ids = text[:maxlen] if len(text) > maxlen else text + (maxlen - len(text)) * [0]
            batch_token_ids.append(token_ids)
            batch_labels.append([label])
            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids], batch_labels
                batch_token_ids, batch_labels = [], []

    def forfit(self):
        while True:
            for d in self.__iter__(self.random):
                yield d

train_generator = data_generator(train_data, batch_size, random=True)
valid_generator = data_generator(valid_data, batch_size)

## 构建模型（文本卷积网络）
# 输入
inputs = Input(shape=(maxlen,), dtype='int32')

# 嵌入层
embedding = Embedding(
    input_dim=vocabulary_size,
    output_dim=embedding_dim,
    input_length=maxlen
)(inputs)
reshape = Reshape((maxlen, embedding_dim, 1))(embedding)

# 卷积层
conv_0 = Conv2D(
    num_filters,
    kernel_size=(filter_sizes[0], embedding_dim),
    padding='valid',
    kernel_initializer='normal',
    activation='relu'
)(reshape)
conv_1 = Conv2D(
    num_filters,
    kernel_size=(filter_sizes[1], embedding_dim),
    padding='valid',
    kernel_initializer='normal',
    activation='relu'
)(reshape)
conv_2 = Conv2D(
    num_filters,
    kernel_size=(filter_sizes[2], embedding_dim),
    padding='valid',
    kernel_initializer='normal',
    activation='relu'
)(reshape)

# 池化层
maxpool_0 = MaxPool2D(
    pool_size=(maxlen - filter_sizes[0] + 1, 1),
    strides=(1, 1),
    padding='valid'
)(conv_0)
maxpool_1 = MaxPool2D(
    pool_size=(maxlen - filter_sizes[1] + 1, 1),
    strides=(1, 1),
    padding='valid'
)(conv_1)
maxpool_2 = MaxPool2D(
    pool_size=(maxlen - filter_sizes[2] + 1, 1),
    strides=(1, 1),
    padding='valid'
)(conv_2)

# 输出层
concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
output = Dense(units=num_classes, activation='softmax')(dropout)

model = Model(inputs=inputs, outputs=output)
model.summary()

model.compile(
    optimizer=Adam(lr=lr),
    loss='sparse_categorical_crossentropy',
    metrics=['accuracy']
)

## 回调函数
class Evaluator(Callback):
    def __init__(self):
        super().__init__()
        self.best_val_f1 = 0.

    def evaluate(self):
        y_true, y_pred = list(), list()
        for x, y in valid_generator:
            y_true.append(y)
            y_pred.append(self.model.predict(x).argmax(axis=1))
        y_true = np.concatenate(y_true)
        y_pred = np.concatenate(y_pred)
        f1 = f1_score(y_true, y_pred, average='macro')
        return f1

    def on_epoch_end(self, epoch, logs=None):
        val_f1 = self.evaluate()
        if val_f1 > self.best_val_f1:
            self.best_val_f1 = val_f1
        logs['val_f1'] = val_f1
        print(f'val_f1: {val_f1:.5f}, best_val_f1: {self.best_val_f1:.5f}')

callbacks = [
    Evaluator(),
    EarlyStopping(
        monitor='val_loss',
        patience=1,
        verbose=1
    ),
    ModelCheckpoint(
        'best_model.weights',
        monitor='val_f1',
        save_weights_only=True,
        save_best_only=True,
        verbose=1,
        mode='max'
    ),
]
## 拟合模型
model.fit(
    train_generator.forfit(),
    steps_per_epoch=len(train_generator),
    epochs=epochs,
    callbacks=callbacks,
    validation_data=valid_generator.forfit(),
    validation_steps=len(valid_generator)
)
# 预测
## 加载数据
df_test['label'] = 0
test_data = load_data(df_test)
test_generator = data_generator(test_data, batch_size)
## 模型预测
result = model.predict_generator(test_generator.forfit(), steps=len(test_generator))
result = result.argmax(axis=1)
# 将结果处理为提交的格式¶
df_test['label'] = result
df_test.to_csv('D:/wk/data/personal-NLP-learning/save/text_cnn02.csv', index=False, columns=['label'])