import pandas as pd
import numpy as np
from bert4keras.optimizers import Adam
from bert4keras.snippets import DataGenerator, sequence_padding
from bert4keras.tokenizers import Tokenizer
import keras
from sklearn.metrics import classification_report
from bert_model import build_model

# 超参数
maxlen = 70
batch_size = 8
learning_rate = 1e-5
epochs = 1
class_nums = 21

# 路径
best_model_filepath = 'model2'

# 预训练模型路径
checkpoint_path = 'model/bert_model.ckpt'
config_path = 'model/bert_config.json'
dict_path = 'model/vocab.txt'

# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)


# 数据生成器
class data_generator(DataGenerator):
    def __iter__(self, random=False):
        batch_token_ids, batch_segment_ids, batch_labels = [], [], []
        # 采样函数，每个样本同时返回一个is_end标记用于判断
        for is_end, (text, label) in self.sample(random):
            token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            batch_labels.append([label])
            # 一个batch_size后
            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                # yield 是一个类似 return 的关键字，迭代一次遇到yield时就返回yield后面的值。重点是：下一次迭代时，从上一次迭代遇到的yield后面的代码开始执行。
                yield [batch_token_ids, batch_segment_ids], batch_labels
                # 重置
                batch_token_ids, batch_segment_ids, batch_labels = [], [], []


if __name__ == '__main__':
    # 加载数据集 (文本, 标签id)
    train_data = pd.read_csv('data/train.csv', header=0)[['text', 'label']].values
    valid_data = pd.read_csv('data/valid.csv', header=0)[['text', 'label']].values
    test_data = pd.read_csv('data/test.csv', header=0)[['text', 'label']].values

    # 打乱数据
    np.random.seed(200)
    np.random.shuffle(train_data)
    np.random.shuffle(valid_data)
    np.random.shuffle(test_data)

    # 转换数据集
    train_generator = data_generator(train_data, batch_size)
    valid_generator = data_generator(valid_data, batch_size)
    test_generator = data_generator(test_data, batch_size)

    # 加载模型
    model = build_model(config_path, checkpoint_path, class_nums)

    # 编译模型
    model.compile(
        loss='sparse_categorical_crossentropy',
        optimizer=Adam(learning_rate),
        metrics=['accuracy']
    )

    # 模型保存
    checkpoint = keras.callbacks.ModelCheckpoint(
        best_model_filepath,
        monitor='val_loss',
        save_best_only=True,
        mode='min'
    )

    # 模型中断
    earlystop = keras.callbacks.EarlyStopping(
        monitor='val_loss',
        patience=4,
        mode='min'
    )

    # 用训练集train和验证集valid训练模型
    model.fit_generator(train_generator.forfit(),
                        steps_per_epoch=len(train_generator),
                        epochs=epochs,
                        validation_data=valid_generator.forfit(),
                        validation_steps=len(valid_generator),
                        shuffle=True,
                        verbose=1,
                        callbacks=[earlystop, checkpoint])

    # 用测试集test评估模型，
    model.load_weights(best_model_filepath)
    test_pred = []
    test_true = []
    for x, y in test_generator:
        result = model.predict(x).argmax(axis=1)
        test_pred.extend(result)
    test_true = test_data[:, 1].tolist()
    label_names = [line.strip() for line in open('data/label', 'r', encoding='utf8')]
    print(classification_report(test_true, test_pred, target_names=label_names))
