"""
This script produces completions for roughly any AutoModelForCausalLM.
Use Instruct Mode, will generate full function code, including the function signature.
"""
import importlib
import re
from typing import List
from multipl_e.multipl_e.completions import make_main, partial_arg_parser
from vllm import LLM, SamplingParams
import torch
from utils.process_utils import extract_content, load_shot
from prompts.gen_prompts import t_problem, t_cot, t_code


class VLLM:
    def __init__(self, name, revision, tokenizer_name=None, tokenizer_revision=None, num_gpus=1, args=None):
        dtype = "float16"
        if torch.cuda.is_bf16_supported():
            dtype = "bfloat16"
        self.model = LLM(
            model=name,
            tokenizer=tokenizer_name,
            dtype=dtype,
            revision=revision,
            max_model_len=4096,
            tokenizer_revision=tokenizer_revision,
            trust_remote_code=True,
            tensor_parallel_size=num_gpus,
            gpu_memory_utilization=0.95,
        )
        self.args = args

    def completions(
        self, prompts: List[str], max_tokens: int, temperature: float, top_p, stop, template_name: str
    ):
        prompts = [prompt.strip() for prompt in prompts]
        e_p, e_cot, _, _, e_res = load_shot(self.args.lang)

        params = SamplingParams(temperature=temperature,
                                    top_p=top_p, max_tokens=2048, min_tokens=8, stop=["</code>"], include_stop_str_in_output=True)
        prompts = [t_problem(e_p) + t_cot(e_cot) + t_code(e_res) + t_problem(prompt) for prompt in prompts]
        outputs = self.model.generate(prompts, params, use_tqdm=False)
        
        full_completions = [prompts[i] + o.outputs[0].text for i, o in enumerate(outputs)]
        codes = extract_content(full_completions, "code")
        return codes, full_completions


def automodel_partial_arg_parser():
    args = partial_arg_parser()
    args.add_argument("--name", type=str, required=True)
    args.add_argument("--revision", type=str)
    args.add_argument("--tokenizer_name", type=str)
    args.add_argument("--tokenizer_revision", type=str)
    args.add_argument("--name-override", type=str)
    args.add_argument("--num_gpus", type=int, default=1)
    args.add_argument("--template-name", type=str, default="cot_rkt")
    return args


def do_name_override(args):
    """
    Applies the --name-override flag, or uses the model name, correcting / and - which the rest of
    the toolchain does not like.
    """
    if args.name_override:
        name = args.name_override
    else:
        name = args.name.replace("/", "_").replace("-", "_")
    return name


def main():
    args = automodel_partial_arg_parser()
    args = args.parse_args()
    model = VLLM(args.name, args.revision, args.tokenizer_name, args.tokenizer_revision, args.num_gpus, args)
    name = do_name_override(args)
    make_main(args, name, model.completions)


if __name__ == "__main__":
    main()
