import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.trainer import Trainer
import pytorch_lightning.callbacks as plc

from choose.src.data_loader.data_loader import get_iterator
from choose.src.model.deco_model import DecoModel
from choose.src.model.model import AttentiveReader, ImpatientReader
from choose.src.trainer.parser import get_parser_args


def load_callbacks(args):
    callbacks = []
    # 这里的monitor和self.log参数一致
    callbacks.append(plc.EarlyStopping(
        monitor='val_acc',
        mode='max',
        patience=10,
        min_delta=0.001
    ))

    callbacks.append(plc.ModelCheckpoint(
        monitor='val_acc',
        filename='best-{epoch:02d}-{val_acc:.3f}',
        save_top_k=1,
        mode='max',
        save_last=True
    ))

    if args.lr_scheduler:
        callbacks.append(plc.LearningRateMonitor(
            logging_interval='epoch'))
    return callbacks


def train(config):
    print('loading data...')
    train_iterator, dev_iterator, embedding_vector = get_iterator(config)
    print('load data finished')
    # model = DeepLstm(config)
    # model = ImpatientReader(config)
    model = AttentiveReader(config)
    # 如果要在embedding中加载的话，应该这样用
    # embedding.weight.data.copy_(weight_matrix)

    deco_model = DecoModel(config, model)
    trainer = Trainer.from_argparse_args(args=config)
    trainer.fit(deco_model, train_iterator, dev_iterator)
    # trainer.test()


def test_auto(config):
    """
    自动加载checkpoint
    :return:
    """
    train_iterator, dev_iterator, embedding_vector = get_iterator(config)
    model = ImpatientReader.load_from_checkpoint('./runs/default/version_0/checkpoints/epoch=6-step=199.ckpt',
                                                 config=config)
    deco_model = DecoModel(config, model)

    for idx, batch in enumerate(dev_iterator):
        result = deco_model.predict_step(batch, idx, 0)
        print(result)


def main():
    args = get_parser_args()
    pl.seed_everything(args.seed)
    # 使用自定义logger
    logger = TensorBoardLogger(save_dir=args.log_dir)
    args.callbacks = load_callbacks(args)
    args.logger = logger

    print('training start!')
    # train(args)
    test_auto(args)
    print('training finished!')


if __name__ == '__main__':
    main()
