#include <iostream>
#include <fstream>
#include <chrono>
#include "opt_infer_engine.h"
#include "tokenization.h"

int main() {
    auto gpt2Tokenizer = tokenizer::GPT2Tokenizer("../vocab.json", "../merges.txt", 50272, 0, L"<unk>");
    std::string text = "Large language models (LLMs) trained on massive text collections have shown surprising "
                       "emergent capabilities to ";
    auto ids = gpt2Tokenizer.convertTokensToIds(gpt2Tokenizer.tokenize(text));
    std::string modelPath = "/home/tb1844/TorchCpp/traced_opt125_model.pt";
    auto opt_infer_engine = OPT::OptInferEngine(modelPath, "../vocab.json", "../merges.txt");
    auto output = opt_infer_engine.getReturnString(text);
    std::cout << "Prompt:\n" << text << std::endl;
    std::cout << "\nResult:\n" << output << std::endl;
    return 0;
}
