import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline


class LargeLanguageModel():
    def __init__(self, model_path):
        self.generator = pipeline(
            "text-generation",
            model=model_path,
            tokenizer=model_path,
            device_map="auto",
            torch_dtype=torch.float16
        )

    def process(self, context, max_length=2048, top_p=0.85, temperature=0.35):
        resp = self.generator(
            context,
            pad_token_id=self.generator.tokenizer.eos_token_id,
            max_new_tokens=max_length,
            return_full_text=False,
            do_sample=True,
            clean_up_tokenization_spaces=True
        )[0]["generated_text"]
        return resp
