import os
import json
import argparse
import utils.data_loader as data_utils
from utils.metrics import run_rank_evaluate
from trainer.predictor import Predictor


def parse_args():
    """
    Parses command line arguments.
    """
    parser = argparse.ArgumentParser('Deep Text Match')

    # data
    parser.add_argument("--input_path", type=str, default="./data/stc/stc.train.tokens.txt", help="input path")

    parser.add_argument("--expt_dir", type=str, default="./outputs/stc_lstm/", help="config path")
    parser.add_argument('--ckpt_path', type=str, default="2018_11_11_00_30_06_step_45000_score_0.8308.tar",
                        help='last checkpoint path')
    return parser.parse_args()


def test(args):
    samples, groups = data_utils.load_pointwise_raw_data(args.input_path)
    print("test samples: {}".format(len(samples)))

    predictor = Predictor(args.expt_dir, args.ckpt_path)
    labels, predictions = [], []
    for label, query, candidate in samples:
        score = predictor.predict(query_sent=query.split(),
                                  candidate_sent=candidate.split())
        predictions += [score]
        labels += [label]
    assert len(predictions) == len(labels)
    res_dict = run_rank_evaluate(labels=labels,
                                 predictions=predictions,
                                 groups=groups)
    print("="*100)
    for key in res_dict:
        print("...{}:{}".format(key, res_dict[key]))
    pass


if __name__ == '__main__':
    test(parse_args())
    pass
