import torch

def gen_mid(items, model, tokenizer, torch_type):
    prompts = []
    for item in items:
        prompt = f"{item['prefix'][-512:]}<FILL_ME>{item['fim_suffix'][:512]}"
        prompts.append(prompt)
    input_ids = tokenizer(prompts, padding=True, truncation=True, return_tensors="pt")["input_ids"].to(model.device)

    with torch.autocast(device_type='cuda', dtype=torch_type):
        generated_ids = model.generate(input_ids, max_new_tokens=512)

    filling = tokenizer.batch_decode(generated_ids[:, input_ids.shape[1]:], skip_special_tokens = True)
    return filling