from models.bert_attention import BertSentiEntity, BertSentiEntityMultilayer
from data_utils.bert_attention_data import get_train_val_data_loader, get_test_loader, token_CLSE
import torch
from config import conf
import pandas as pd
from os.path import join
from glob import glob

DEVICE_ID = conf.get('gpu', 'device_id')
INFERENCE_DIR = conf.get('dir', 'inference_result_dir')


def train(model_clss):
    device = torch.device("cuda:%s" % (DEVICE_ID) if torch.cuda.is_available() else "cpu")
    train_dataloader, val_dataloader, tokenizer = get_train_val_data_loader(device, 4, shuffle=True, maxlen=400)
    bert_attn_model = model_clss(tokenizer, token_CLSE, 5e-5).to(device)
    bert_attn_model.train_epochs(train_dataloader, val_dataloader, num_epoch=50, early_stop=10)


def test(model_class):
    device = torch.device("cuda:%s" % (DEVICE_ID) if torch.cuda.is_available() else "cpu")
    test_loader = get_test_loader(device, 4, maxlen=400)
    _, _, tokenizer = get_train_val_data_loader(device, 4, shuffle=True, maxlen=400)
    bert_attn_model = model_class(tokenizer, token_CLSE, 5e-5, version_id=1).to(device)
    version_id = len(glob(bert_attn_model.model_root + '/*version*'))
    bert_attn_model = model_class(tokenizer, token_CLSE, 5e-5, version_id=version_id).to(device)
    bert_attn_model.load_best_model()
    results = list(bert_attn_model.inference(test_loader))

    save_path = join(bert_attn_model.model_save_dir, 'data', 'raw_rs.csv')
    pd.DataFrame(results, columns=['id', 'negative', 'predict', 'entity_list']).to_csv(save_path, index=False)
    print('saved ', save_path)


if __name__ == '__main__':
   # train(BertSentiEntityMultilayer)
    test(BertSentiEntityMultilayer)
