#include "llm.h"

namespace tc = triton::client;

#define FAIL_IF_ERR(X, MSG)                                        \
  {                                                                \
    tc::Error err = (X);                                           \
    if (!err.IsOk()) {                                             \
      std::cerr << "error: " << (MSG) << ": " << err << std::endl; \
      exit(1);                                                     \
    }                                                              \
  }

namespace {

void
ValidateShapeAndDatatype(
    const std::string& name, std::shared_ptr<tc::InferResult> result)
{
  std::vector<int64_t> shape;
  FAIL_IF_ERR(
      result->Shape(name, &shape), "unable to get shape for '" + name + "'");
  // Validate shape
  if ((shape.size() != 1) || shape[0] != 512) {
    std::cerr << "error: received incorrect shapes for '" << name << "'"
              << std::endl;
    // exit(1);
  }
  std::string datatype;
  FAIL_IF_ERR(
      result->Datatype(name, &datatype),
      "unable to get datatype for '" + name + "'");
  // Validate datatype
  if (datatype.compare("FP32") != 0) {
    std::cerr << "error: received incorrect datatype for '" << name
              << "': " << datatype << std::endl;
    // exit(1);
  }
}

}  // namespace

LLM::LLM()
{
    bool verbose = true;
    FAIL_IF_ERR(
        tc::InferenceServerHttpClient::Create(&httpClient_, httpServerUrl_, verbose),
        "unable to create http client");
}

std::string LLM::chat(const std::string &prompt)
{
  tc::Headers http_headers;
    // input: request_type
	int32_t request_type_data = 0;
	tc::InferInput* request_type;
	std::vector<int64_t> request_type_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&request_type, "request_type", request_type_shape, "INT32"),
		"unable to create 'request_type'");
	std::shared_ptr<tc::InferInput> request_type_ptr(request_type);
	FAIL_IF_ERR(request_type_ptr->Reset(), "unable to reset 'request_type'");
	FAIL_IF_ERR(
		request_type_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&request_type_data), sizeof(int32_t)),
		"unable to set data for 'request_type'");


    // input: text_input
    std::vector<std::string> text_input_data(1);
    std::vector<int64_t> text_input_shape{ 1 };
    text_input_data[0] = prompt;
    tc::InferInput* text_input;
    FAIL_IF_ERR(
        tc::InferInput::Create(&text_input, "text_input", text_input_shape, "BYTES"),
        "unable to get text_input");
    std::shared_ptr<tc::InferInput> text_input_ptr;
    text_input_ptr.reset(text_input);
    text_input_ptr->SetBinaryData(true);
    FAIL_IF_ERR(
        text_input_ptr->AppendFromString(text_input_data),
        "unable to set data for text_input");

	// input: temperature_input
	int32_t temperature_input_data = 0.2;
	tc::InferInput* temperature_input;
	std::vector<int64_t> temperature_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&temperature_input, "temperature_input", temperature_input_shape, "FP32"),
		"unable to create 'temperature_input'");
	std::shared_ptr<tc::InferInput> temperature_input_ptr(temperature_input);
	FAIL_IF_ERR(temperature_input_ptr->Reset(), "unable to reset 'temperature_input'");
	FAIL_IF_ERR(
		temperature_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&temperature_input_data), sizeof(int32_t)),
		"unable to set data for 'temperature_input'");


	// input: top_k_input
	int32_t top_k_input_data = 40;
	tc::InferInput* top_k_input;
	std::vector<int64_t> top_k_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&top_k_input, "top_k_input", top_k_input_shape, "INT32"),
		"unable to create 'top_k_input'");
	std::shared_ptr<tc::InferInput> top_k_input_ptr(top_k_input);
	FAIL_IF_ERR(top_k_input_ptr->Reset(), "unable to reset 'top_k_input'");
	FAIL_IF_ERR(
		top_k_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&top_k_input_data), sizeof(int32_t)),
		"unable to set data for 'top_k_input'");


	// input: top_p_input
	int32_t top_p_input_data = 0.9;
	tc::InferInput* top_p_input;
	std::vector<int64_t> top_p_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&top_p_input, "top_p_input", top_p_input_shape, "FP32"),
		"unable to create 'top_p_input'");
	std::shared_ptr<tc::InferInput> top_p_input_ptr(top_p_input);
	FAIL_IF_ERR(top_p_input_ptr->Reset(), "unable to reset 'top_p_input'");
	FAIL_IF_ERR(
		top_p_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&top_p_input_data), sizeof(int32_t)),
		"unable to set data for 'top_p_input'");
	

	// input: n_keep_input
	// TODO n_keep
	int32_t n_keep_input_data = 0;
	tc::InferInput* n_keep_input;
	std::vector<int64_t> n_keep_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&n_keep_input, "n_keep_input", n_keep_input_shape, "INT32"),
		"unable to create 'n_keep_input'");
	std::shared_ptr<tc::InferInput> n_keep_input_ptr(n_keep_input);
	FAIL_IF_ERR(n_keep_input_ptr->Reset(), "unable to reset 'n_keep_input'");
	FAIL_IF_ERR(
		n_keep_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&n_keep_input_data), sizeof(int32_t)),
		"unable to set data for 'n_keep_input'");


	// input: n_predict_input 256
  int32_t n_predict_input_data = 256;
	tc::InferInput* n_predict_input;
	std::vector<int64_t> n_predict_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&n_predict_input, "n_predict_input", n_predict_input_shape, "INT32"),
		"unable to create 'n_predict_input'");
	std::shared_ptr<tc::InferInput> n_predict_input_ptr(n_predict_input);
	FAIL_IF_ERR(n_predict_input_ptr->Reset(), "unable to reset 'n_predict_input'");
	FAIL_IF_ERR(
		n_predict_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&n_predict_input_data), sizeof(int32_t)),
		"unable to set data for 'n_predict_input'");



	// input: cache_prompt_input True
  bool cache_prompt_input_data = true;
	tc::InferInput* cache_prompt_input;
	std::vector<int64_t> cache_prompt_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&cache_prompt_input, "cache_prompt_input", cache_prompt_input_shape, "BOOL"),
		"unable to create 'cache_prompt_input'");
	std::shared_ptr<tc::InferInput> cache_prompt_input_ptr(cache_prompt_input);
	FAIL_IF_ERR(cache_prompt_input_ptr->Reset(), "unable to reset 'cache_prompt_input'");
	FAIL_IF_ERR(
		cache_prompt_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&cache_prompt_input_data), sizeof(bool)),
		"unable to set data for 'cache_prompt_input'");


	// input: stop_input "\n### Human"
  std::vector<std::string> stop_input_data(1);
    std::vector<int64_t> stop_input_shape{ 1 };
    stop_input_data[0] = "";
    tc::InferInput* stop_input;
    FAIL_IF_ERR(
        tc::InferInput::Create(&stop_input, "stop_input", stop_input_shape, "BYTES"),
        "unable to get stop_input");
    std::shared_ptr<tc::InferInput> stop_input_ptr;
    stop_input_ptr.reset(stop_input);
    stop_input_ptr->SetBinaryData(true);
    FAIL_IF_ERR(
        stop_input_ptr->AppendFromString(stop_input_data),
        "unable to set data for stop_input");



	// input: stream_input True
  bool stream_input_data = true;
	tc::InferInput* stream_input;
	std::vector<int64_t> stream_input_shape { 1 };
	FAIL_IF_ERR(
		tc::InferInput::Create(&stream_input, "stream_input", stream_input_shape, "BOOL"),
		"unable to create 'stream_input'");
	std::shared_ptr<tc::InferInput> stream_input_ptr(stream_input);
	FAIL_IF_ERR(stream_input_ptr->Reset(), "unable to reset 'stream_input'");
	FAIL_IF_ERR(
		stream_input_ptr->AppendRaw(reinterpret_cast<uint8_t*>(&stream_input_data), sizeof(bool)),
		"unable to set data for 'stream_input'");

    // Generate the outputs to be requested.
    tc::InferRequestedOutput* text_output;
    FAIL_IF_ERR(
        tc::InferRequestedOutput::Create(&text_output, "text_output"),
        "unable to get text_output");
    std::shared_ptr<tc::InferRequestedOutput> text_output_ptr;
    text_output_ptr.reset(text_output);
    text_output_ptr->SetBinaryData(true);

    tc::InferRequestedOutput* token_output;
    FAIL_IF_ERR(
        tc::InferRequestedOutput::Create(&token_output, "token_output"),
        "unable to get token_output");
    std::shared_ptr<tc::InferRequestedOutput> token_output_ptr;
    token_output_ptr.reset(token_output);
    token_output_ptr->SetBinaryData(true);


    // The inference settings. Will be using default for now.
    tc::InferOptions options(modelName_);
    options.model_version_ = modelVersion_;

    std::vector<tc::InferInput*> inputs = {request_type_ptr.get(), text_input_ptr.get(), temperature_input_ptr.get(), 
                                          top_k_input_ptr.get(), top_p_input_ptr.get(),
                                          n_keep_input_ptr.get(), n_predict_input_ptr.get(),
                                          cache_prompt_input_ptr.get(), stop_input_ptr.get(),
                                          stream_input_ptr.get()};
    std::vector<const tc::InferRequestedOutput*> outputs = { text_output_ptr.get(), token_output_ptr.get() };

    tc::InferResult* results;
    FAIL_IF_ERR(
        httpClient_->Infer(&results, options, inputs, outputs, http_headers),
        "unable to run model");
    std::shared_ptr<tc::InferResult> results_ptr;
    results_ptr.reset(results);

    // Validate the results...
    ValidateShapeAndDatatype("text_output", results_ptr);
    ValidateShapeAndDatatype("token_output", results_ptr);

    // Get the result data
  std::vector<std::string> text_output_data;
  FAIL_IF_ERR(
      results_ptr->StringData("text_output", &text_output_data),
      "unable to get data for text_output");
  if (text_output_data.size() != 16) {
    std::cerr << "error: received incorrect number of strings for text_output: "
              << text_output_data.size() << std::endl;
    // exit(1);
  }
  std::cout << "text_output: " << text_output_data[0]  << std::endl;


    int32_t* token_output_data_ptr;
    size_t token_output_byte_size = 512 * 4;
    size_t recv_token_output_byte_size;

    // Get the result data
    std::vector<std::string> result0_data;

    FAIL_IF_ERR(
        results_ptr->RawData(
            "token_output", (const uint8_t**)&token_output_data_ptr,
            &recv_token_output_byte_size),
        "unable to get result data for 'token_output'");
    if (recv_token_output_byte_size != token_output_byte_size) {
      std::cerr << "error: received incorrect byte size for 'token_output': "
                << recv_token_output_byte_size << std::endl;
      // exit(1);
    }
    std::cout << "token_output: ";
    for (size_t i = 0; i < 512; ++i) {
      std::cout << token_output_data_ptr[i] << " ";
    }
    std::cout << std::endl;

    // Get full response
    std::cout << results_ptr->DebugString() << std::endl;

    std::cout << "PASS : String Infer" << std::endl;

    return text_output_data[0];
}