import testData  
from flags import parse_args

#调用参数
FLAGS, unparsed = parse_args()

# prepare tokenizer on train set

# load training dataset (6K)
filename = FLAGS.trainImages
train = testData.load_set(filename)
print('Dataset: %d' % len(train))
# descriptions
train_descriptions = testData.load_clean_descriptions(FLAGS.descriptions, train)
print('Descriptions: train=%d' % len(train_descriptions))
# prepare tokenizer
tokenizer = testData.create_tokenizer(train_descriptions)
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary Size: %d' % vocab_size)
# determine the maximum sequence length
max_length = testData.max_length(train_descriptions)
print('Description Length: %d' % max_length)

# prepare test set

# load test set
filename = FLAGS..testImages
test = testData.load_set(filename)
print('Dataset: %d' % len(test))
# descriptions
test_descriptions = testData.load_clean_descriptions(FLAGS.descriptions, test)
print('Descriptions: test=%d' % len(test_descriptions))
# photo features
test_features = testData.load_photo_features(FLAGS.features, test)
print('Photos: test=%d' % len(test_features))

# load the model
filename = FLAGS.model
model = testData.load_model(filename)
# evaluate model
evaluate_model(model, test_descriptions, test_features, tokenizer, max_length)