"""
This script produces completions for roughly any AutoModelForCausalLM.
Use Instruct Mode, will generate full function code, including the function signature.
"""
import torch
from typing import List
from multipl_e.multipl_e.completions import make_main, partial_arg_parser
from vllm import LLM, SamplingParams
from methods.k_cot.k_cot import K_COT
from methods.cot.cot import COT
from methods.raw.raw import RAW
import hydra
from omegaconf import DictConfig


class VLLM:
    def __init__(self, name, revision, tokenizer_name=None, tokenizer_revision=None, num_gpus=1):
        dtype = "float16"
        # if torch.cuda.is_bf16_supported():
        #     dtype = "bfloat16"
        self.model = LLM(
            model=name,
            tokenizer=tokenizer_name,
            dtype=dtype,
            revision=revision,
            max_model_len=16384,
            tokenizer_revision=tokenizer_revision,
            trust_remote_code=True,
            tensor_parallel_size=num_gpus,
            gpu_memory_utilization=0.85,
        )

    def completions(
        self, prompts: List[str], max_tokens: int, temperature: float, top_p, stop, method
    ):
        params = SamplingParams(temperature=temperature, top_p=top_p, max_tokens=2048, min_tokens=8, stop=stop, include_stop_str_in_output=True)
        base_outputs = method.run(prompts, self.completion_fn, params)
        return [base_output.code for base_output in base_outputs], [base_output.full_completion for base_output in base_outputs]
    
    def completion_fn(self, prompts: List[str], params: SamplingParams):
        """
        completion function use vllm
        """
        return self.model.generate(prompts, params, use_tqdm=False)


def do_name_override(args):
    """
    Applies the --name-override flag, or uses the model name, correcting / and - which the rest of
    the toolchain does not like.
    """
    if args.name_override:
        name = args.name_override
    else:
        name = args.name.replace("/", "_").replace("-", "_")
    return name

@hydra.main(config_path="config", config_name="inference", version_base="1.3")
def main(cfg: DictConfig):
    if cfg.method == "raw":
        method = RAW(cfg.lang)
    elif cfg.method == "cot":
        method = COT(cfg.lang)
    elif cfg.method == "k_cot":
        method = K_COT(cfg.lang)
    else:
        raise ValueError(f"Unknown method: {cfg.method}")
    
    model = VLLM(cfg.name, None, None, None, cfg.num_gpus)
    cfg.name = do_name_override(cfg)
    make_main(cfg, cfg.name, model.completions, method)

if __name__ == "__main__":
    main()
