
from typing import (Any, Callable, ClassVar, Dict, List, Optional, Sequence,
                    Tuple, Type, Union, cast, overload)

class LLM():

    def __init__(
            self,
            model: str,
            skip_tokenizer_init: bool = False,
            trust_remote_code: bool = False,
            allowed_local_media_path: str = "",
            tensor_parallel_size: int = 1,
            dtype: str = "auto",
            quantization: Optional[str] = None,
            revision: Optional[str] = None,
            tokenizer_revision: Optional[str] = None,
            seed: int = 0,
            gpu_memory_utilization: float = 0.9,
            swap_space: float = 4,
            cpu_offload_gb: float = 0,
            enforce_eager: Optional[bool] = None,
            max_seq_len_to_capture: int = 8192,
            disable_async_output_proc: bool = False,
            mm_processor_kwargs: Optional[Dict[str, Any]] = None,
            # After positional args are removed, move this right below `model`
            compilation_config: Optional[Union[int, Dict[str, Any]]] = None,
            **kwargs,
    ):
        self.engine_class = Type[LLMEngine]
        self.llm_engine = self.engine_class.from_engine_args(
            engine_args, usage_context=UsageContext.LLM_CLASS)

        self.request_counter = Counter()


    def generate(self):

        pass