import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from hsq.data_processing.data import MedQA
from bert4keras.snippets import DataGenerator, sequence_padding
from bert4keras.tokenizers import Tokenizer
from bert4keras.optimizers import Adam
from sklearn.metrics import classification_report
from hsq.medqa.bert_textcnn_chinese_model import build_bert_model
from bert4keras.backend import keras
from tensorflow.keras.callbacks import ReduceLROnPlateau


prefix = './BERT/albert_tiny'
config_path = prefix + '/albert_config_tiny_g.json'
chekpoint_path = prefix + '/albert_model.ckpt'
dict_path = prefix + '/vocab.txt'
best_model_filepath = '../mymodel/medqa_model.h5'
maxlen = 128
batch_size = 128
class_nums = 23


# 建立分词器
tokenizer = Tokenizer(dict_path)


class data_generator(DataGenerator):
    '''
    数据生成器
    '''
    def __iter__(self, random=False):
        batch_token_ids, batch_segment_ids, batch_labels = [], [], []
        for is_end, (label, text) in self.sample(random):
            token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
            batch_token_ids.append(token_ids)
            batch_segment_ids.append(segment_ids)
            batch_labels.append([label])
            if len(batch_token_ids) == self.batch_size or is_end:
                batch_token_ids = sequence_padding(batch_token_ids)
                batch_segment_ids = sequence_padding(batch_segment_ids)
                batch_labels = sequence_padding(batch_labels)
                yield [batch_token_ids, batch_segment_ids], batch_labels  # [模型的输入]，标签
                batch_token_ids, batch_segment_ids, batch_labels = [], [], []  # 再次初始化


def train():
    medqa = MedQA()
    train_data, test_data, val_data = medqa.medqa_load_data()

    # 转换数据集
    train_generator = data_generator(train_data, batch_size)
    val_generator = data_generator(val_data, batch_size)
    test_generator = data_generator(test_data, batch_size)

    model = build_bert_model(config_path=config_path, checkpoint_path=chekpoint_path,
                             class_nums=class_nums)
    model.compile(
        loss='sparse_categorical_crossentropy',
        optimizer=Adam(5e-5),
        metrics=['accuracy']
    )

    earlystop = keras.callbacks.EarlyStopping(
        monitor='accuracy',
        patience=2,
        verbose=2,
        mode='max'
    )
    reduce = ReduceLROnPlateau(
        monitor='loss',
        verbose=1,
        model='auto',
        factor=0.5
    )

    if os.path.exists(best_model_filepath):
        print('---------------load the model---------------')
        model.load_weights(best_model_filepath)

    checkpoint = keras.callbacks.ModelCheckpoint(
        best_model_filepath,
        monitor='val_acc',
        verbose=1,
        save_best_only=True,
        mode='max'
    )
    # 传入迭代器进行训练
    model.fit(
        # train_generator.forfit(),
        test_generator.forfit(),
        # steps_per_epoch=len(train_generator),
        steps_per_epoch=len(test_generator),
        epochs=10,
        validation_data=val_generator.forfit(),
        validation_steps=len(val_generator),
        shuffle=True,
        callbacks=[checkpoint, earlystop, reduce]
    )

    model.save_weights(best_model_filepath)

    model.load_weights(best_model_filepath)

    test_pred = []
    test_true = []
    for x, y in test_generator:
        p = model.predict(x).argmax(axis=1)
        test_pred.extend(p)
    test_true = [text[0] for text in test_data]
    print(set(test_true))
    print(set(test_pred))

    target_names = ['内科',
                    '外科',
                    '妇产科',
                    '儿科',
                    '皮肤性病科',
                    '五官科',
                    '肿瘤科',
                    '心理健康科',
                    '中医科',
                    '传染科',
                    '整形美容科',
                    '美容',
                    '药品',
                    '辅助检查科',
                    '保健养生',
                    '康复医学科',
                    '家居环境',
                    '子女教育',
                    '营养保健科',
                    '运动瘦身',
                    '遗传',
                    '体检科',
                    '其他科室']
    # 这里需要设置labels=range(class_nums)，测试集样本中的标签数没有完全覆盖总标签数会报错
    print(classification_report(test_true, test_pred, labels=range(class_nums),
                                target_names=target_names, zero_division=0))


def test():
    model = build_bert_model(config_path=config_path, checkpoint_path=chekpoint_path,
                             class_nums=class_nums)
    model.compile(
        loss='sparse_categorical_crossentropy',
        optimizer=Adam(5e-5),
        metrics=['accuracy']
    )
    model.load_weights(best_model_filepath)

    medqa = MedQA()
    train_data, test_data, val_data = medqa.medqa_load_data()
    test_generator = data_generator(test_data)
    test_pred = []
    test_true = []
    for x, y in test_generator:
        p = model.predict(x).argmax(axis=1)
        test_pred.extend(p)
    test_true = [text[0] for text in test_data]
    print(set(test_true))
    print(set(test_pred))

    target_names = ['内科',
                    '外科',
                    '妇产科',
                    '儿科',
                    '皮肤性病科',
                    '五官科',
                    '肿瘤科',
                    '心理健康科',
                    '中医科',
                    '传染科',
                    '整形美容科',
                    '美容',
                    '药品',
                    '辅助检查科',
                    '保健养生',
                    '康复医学科',
                    '家居环境',
                    '子女教育',
                    '营养保健科',
                    '运动瘦身',
                    '遗传',
                    '体检科',
                    '其他科室']
    # 这里需要设置labels=range(class_nums)，测试集样本中的标签数没有完全覆盖总标签数会报错
    print(classification_report(test_true, test_pred, labels=range(class_nums),
                                target_names=target_names, zero_division=0))




