import torch


def eval_model(model, tokenizer, batch_size, tasks, logger):
    SEQ_LEN_OUT = 100
    logger.info("Evaluating model...")
    test_prompt = "what is deep learning?"
    test_input = tokenizer(test_prompt, return_tensors="pt").to(model.device)
    model.eval()
    with torch.no_grad():
        generated_ids = model.generate(test_input.input_ids, attention_mask=test_input.attention_mask, max_new_tokens=SEQ_LEN_OUT)
        res = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
        logger.info(f"test_prompt: {test_prompt}")
        logger.info(f"res: {res}")

    import lm_eval
    from lm_eval.models.huggingface import HFLM
    hflm = HFLM(model, tokenizer=tokenizer, batch_size=batch_size)
    if "boolq" in tasks:
        results = lm_eval.simple_evaluate(hflm, tasks=['boolq'], num_fewshot=0, log_samples=False, batch_size=batch_size)
        logger.info(results)
    if "ceval-valid" in tasks:
        results = lm_eval.simple_evaluate(hflm, tasks=['ceval-valid'], num_fewshot=5, log_samples=False, batch_size=batch_size)
        logger.info(results)
    if "gsm8k" in tasks:
        results = lm_eval.simple_evaluate(hflm, tasks=['gsm8k'], num_fewshot=0, log_samples=False, batch_size=batch_size)
        logger.info(results)
