#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File  : train.py
# Author: anyongjin
# Date  : 2020/8/14
from DistillBert.utils import logger

def train_general():
    from DistillBert.distiller import GeneralDistiller
    distiller = GeneralDistiller()
    # distiller.distill_general()
    distiller.test_light_bert_steps()


def quantize_trained_model():
    '''
    对一个已训练好的模型进行量化。
    '''
    import os
    from DistillBert.model import StudentModel
    from DistillBert.data import DataLoader
    wrapper = StudentModel()
    logger.warning('converting to quantize aware model...')
    wrapper.convert_to_quantize_aware_model()
    logger.warning('quantizing the model...')
    wrapper.quantize_aware_model()
    logger.warning('quantize complete, testing...')
    dev_dir = os.path.join(wrapper.config['data_dir'], wrapper.config['task']['dev_dir'])
    dev_loader = DataLoader(dev_dir, col_processers=None,
                            batch_size=wrapper.train_args['batch_size'])
    res = wrapper.test(dev_loader)
    logger.warning(f'test on quantized model:{res}')
    wrapper.save()
    logger.warning(f'quantized model saved at {wrapper.out_model_dir}')


def train_task():
    from DistillBert.distiller import TaskDistiller
    from DistillBert.utils import clean_text
    distiller = TaskDistiller(distill_from_teacher=True, do_quantize=False)
    token_func = distiller.get_tokenize_func()

    col_processers = [
        {
            'name': 'text',
            'processer': lambda text: token_func(clean_text(text))
        },
        {
            'name': 'label',
            'processer': lambda label: distiller.label_vocab[label]
        }
    ]
    # distiller.test_student_steps(col_processers)
    # distiller.train_student(distiller.student, col_processers)
    distiller.distill_task(col_processers=col_processers, train_teacher=False)
    # distiller.student.save('D:/Data/bert_classify/serve_models/gen_classify/5', save_format='tf')
    # distiller.test(col_processers=col_processers)


if __name__ == '__main__':
    # train_general()
    train_task()
    # quantize_trained_model()
