from vllm import LLM, SamplingParams


def is_overlength(vllm_engine: LLM, 
               text: str,
               max_length: int=32768) -> bool:
    tokenizer = vllm_engine.get_tokenizer()
    tokenize_length = len(tokenizer.encode(text))
    if tokenize_length >= max_length:
        return True
    else:
        return False
    
def is_underlength(vllm_engine: LLM, 
                    text: str,
                    max_length: int=32768) -> bool:
    return not is_overlength(vllm_engine,
                            text,
                            max_length)
    