import os
import sys

import fire
import torch

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from llmpt.model import GPTInference, GPTInferenceHparams, GPTModel


def inference(
    model_path: str,
):
    print("Preparing model...", file=sys.stderr, flush=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = GPTModel.load(model_path, device=device)
    inferencing = GPTInference(
        hparams=GPTInferenceHparams(
            context_size=512,
            generation_size=512,
            temperature=1.0),
        vocab=model.vocab,
        nn=model.nn,
        device=device,
        compile=False)

    while True:
        prefix = input("Input the prefix: ")
        print(prefix, end="")
        for token in inferencing.generate(prefix):
            token = token.replace("\r\n", "\n")
            token = token.replace("\n\r", "\n")
            token = token.replace("\r", "\n")
            print(token, end="", flush=True)
        print("\n" * 3)


if __name__ == '__main__':
    fire.Fire(inference)
