/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// automatically generated by the FlatBuffers compiler, do not modify


#ifndef FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_
#define FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_

#include "flatbuffers/flatbuffers.h"

// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
              FLATBUFFERS_VERSION_MINOR == 3 &&
              FLATBUFFERS_VERSION_REVISION == 25,
             "Non-compatible flatbuffers version included");

namespace tflite {

struct ComputeSettings;
struct ComputeSettingsBuilder;
struct ComputeSettingsT;

struct NNAPISettings;
struct NNAPISettingsBuilder;
struct NNAPISettingsT;

struct GPUSettings;
struct GPUSettingsBuilder;
struct GPUSettingsT;

struct HexagonSettings;
struct HexagonSettingsBuilder;
struct HexagonSettingsT;

struct XNNPackSettings;
struct XNNPackSettingsBuilder;
struct XNNPackSettingsT;

struct CoreMLSettings;
struct CoreMLSettingsBuilder;
struct CoreMLSettingsT;

struct StableDelegateLoaderSettings;
struct StableDelegateLoaderSettingsBuilder;
struct StableDelegateLoaderSettingsT;

struct CompilationCachingSettings;
struct CompilationCachingSettingsBuilder;
struct CompilationCachingSettingsT;

struct EdgeTpuDeviceSpec;
struct EdgeTpuDeviceSpecBuilder;
struct EdgeTpuDeviceSpecT;

struct EdgeTpuInactivePowerConfig;
struct EdgeTpuInactivePowerConfigBuilder;
struct EdgeTpuInactivePowerConfigT;

struct EdgeTpuSettings;
struct EdgeTpuSettingsBuilder;
struct EdgeTpuSettingsT;

struct GoogleEdgeTpuSettings;
struct GoogleEdgeTpuSettingsBuilder;
struct GoogleEdgeTpuSettingsT;

struct CoralSettings;
struct CoralSettingsBuilder;
struct CoralSettingsT;

struct CPUSettings;
struct CPUSettingsBuilder;
struct CPUSettingsT;

struct ArmNNSettings;
struct ArmNNSettingsBuilder;
struct ArmNNSettingsT;

struct MtkNeuronSettings;
struct MtkNeuronSettingsBuilder;
struct MtkNeuronSettingsT;

struct TFLiteSettings;
struct TFLiteSettingsBuilder;
struct TFLiteSettingsT;

struct FallbackSettings;
struct FallbackSettingsBuilder;
struct FallbackSettingsT;

struct BenchmarkMetric;
struct BenchmarkMetricBuilder;
struct BenchmarkMetricT;

struct BenchmarkResult;
struct BenchmarkResultBuilder;
struct BenchmarkResultT;

namespace BenchmarkResult_ {

struct InferenceOutput;
struct InferenceOutputBuilder;
struct InferenceOutputT;

}  // namespace BenchmarkResult_

struct ErrorCode;
struct ErrorCodeBuilder;
struct ErrorCodeT;

struct BenchmarkError;
struct BenchmarkErrorBuilder;
struct BenchmarkErrorT;

struct BenchmarkEvent;
struct BenchmarkEventBuilder;
struct BenchmarkEventT;

struct BestAccelerationDecision;
struct BestAccelerationDecisionBuilder;
struct BestAccelerationDecisionT;

struct BenchmarkInitializationFailure;
struct BenchmarkInitializationFailureBuilder;
struct BenchmarkInitializationFailureT;

struct MiniBenchmarkEvent;
struct MiniBenchmarkEventBuilder;
struct MiniBenchmarkEventT;

struct ModelFile;
struct ModelFileBuilder;
struct ModelFileT;

struct ModelIdGroup;
struct ModelIdGroupBuilder;
struct ModelIdGroupT;

struct BenchmarkStoragePaths;
struct BenchmarkStoragePathsBuilder;
struct BenchmarkStoragePathsT;

struct ValidationSettings;
struct ValidationSettingsBuilder;
struct ValidationSettingsT;

struct MinibenchmarkSettings;
struct MinibenchmarkSettingsBuilder;
struct MinibenchmarkSettingsT;

struct BenchmarkEventStorage;
struct BenchmarkEventStorageBuilder;
struct BenchmarkEventStorageT;

bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs);
bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs);
bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs);
bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs);
bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs);
bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs);
bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs);
bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs);
bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs);
bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs);
bool operator==(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs);
bool operator!=(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs);
bool operator==(const StableDelegateLoaderSettingsT &lhs, const StableDelegateLoaderSettingsT &rhs);
bool operator!=(const StableDelegateLoaderSettingsT &lhs, const StableDelegateLoaderSettingsT &rhs);
bool operator==(const CompilationCachingSettingsT &lhs, const CompilationCachingSettingsT &rhs);
bool operator!=(const CompilationCachingSettingsT &lhs, const CompilationCachingSettingsT &rhs);
bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs);
bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs);
bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs);
bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs);
bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs);
bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs);
bool operator==(const GoogleEdgeTpuSettingsT &lhs, const GoogleEdgeTpuSettingsT &rhs);
bool operator!=(const GoogleEdgeTpuSettingsT &lhs, const GoogleEdgeTpuSettingsT &rhs);
bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs);
bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs);
bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs);
bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs);
bool operator==(const ArmNNSettingsT &lhs, const ArmNNSettingsT &rhs);
bool operator!=(const ArmNNSettingsT &lhs, const ArmNNSettingsT &rhs);
bool operator==(const MtkNeuronSettingsT &lhs, const MtkNeuronSettingsT &rhs);
bool operator!=(const MtkNeuronSettingsT &lhs, const MtkNeuronSettingsT &rhs);
bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs);
bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs);
bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs);
bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs);
bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs);
bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs);
bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs);
bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs);
namespace BenchmarkResult_ {

bool operator==(const InferenceOutputT &lhs, const InferenceOutputT &rhs);
bool operator!=(const InferenceOutputT &lhs, const InferenceOutputT &rhs);
}  // namespace BenchmarkResult_

bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs);
bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs);
bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs);
bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs);
bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs);
bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs);
bool operator==(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs);
bool operator!=(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs);
bool operator==(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs);
bool operator!=(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs);
bool operator==(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs);
bool operator!=(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs);
bool operator==(const ModelFileT &lhs, const ModelFileT &rhs);
bool operator!=(const ModelFileT &lhs, const ModelFileT &rhs);
bool operator==(const ModelIdGroupT &lhs, const ModelIdGroupT &rhs);
bool operator!=(const ModelIdGroupT &lhs, const ModelIdGroupT &rhs);
bool operator==(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs);
bool operator!=(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs);
bool operator==(const ValidationSettingsT &lhs, const ValidationSettingsT &rhs);
bool operator!=(const ValidationSettingsT &lhs, const ValidationSettingsT &rhs);
bool operator==(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs);
bool operator!=(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs);
bool operator==(const BenchmarkEventStorageT &lhs, const BenchmarkEventStorageT &rhs);
bool operator!=(const BenchmarkEventStorageT &lhs, const BenchmarkEventStorageT &rhs);

enum ExecutionPreference : int32_t {
  ExecutionPreference_ANY = 0,
  ExecutionPreference_LOW_LATENCY = 1,
  ExecutionPreference_LOW_POWER = 2,
  ExecutionPreference_FORCE_CPU = 3,
  ExecutionPreference_MIN = ExecutionPreference_ANY,
  ExecutionPreference_MAX = ExecutionPreference_FORCE_CPU
};

inline const ExecutionPreference (&EnumValuesExecutionPreference())[4] {
  static const ExecutionPreference values[] = {
    ExecutionPreference_ANY,
    ExecutionPreference_LOW_LATENCY,
    ExecutionPreference_LOW_POWER,
    ExecutionPreference_FORCE_CPU
  };
  return values;
}

inline const char * const *EnumNamesExecutionPreference() {
  static const char * const names[5] = {
    "ANY",
    "LOW_LATENCY",
    "LOW_POWER",
    "FORCE_CPU",
    nullptr
  };
  return names;
}

inline const char *EnumNameExecutionPreference(ExecutionPreference e) {
  if (::flatbuffers::IsOutRange(e, ExecutionPreference_ANY, ExecutionPreference_FORCE_CPU)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesExecutionPreference()[index];
}

enum Delegate : int32_t {
  Delegate_NONE = 0,
  Delegate_NNAPI = 1,
  Delegate_GPU = 2,
  Delegate_HEXAGON = 3,
  Delegate_XNNPACK = 4,
  Delegate_EDGETPU = 5,
  Delegate_EDGETPU_CORAL = 6,
  Delegate_CORE_ML = 7,
  Delegate_ARMNN = 8,
  Delegate_MTK_NEURON = 9,
  Delegate_MIN = Delegate_NONE,
  Delegate_MAX = Delegate_MTK_NEURON
};

inline const Delegate (&EnumValuesDelegate())[10] {
  static const Delegate values[] = {
    Delegate_NONE,
    Delegate_NNAPI,
    Delegate_GPU,
    Delegate_HEXAGON,
    Delegate_XNNPACK,
    Delegate_EDGETPU,
    Delegate_EDGETPU_CORAL,
    Delegate_CORE_ML,
    Delegate_ARMNN,
    Delegate_MTK_NEURON
  };
  return values;
}

inline const char * const *EnumNamesDelegate() {
  static const char * const names[11] = {
    "NONE",
    "NNAPI",
    "GPU",
    "HEXAGON",
    "XNNPACK",
    "EDGETPU",
    "EDGETPU_CORAL",
    "CORE_ML",
    "ARMNN",
    "MTK_NEURON",
    nullptr
  };
  return names;
}

inline const char *EnumNameDelegate(Delegate e) {
  if (::flatbuffers::IsOutRange(e, Delegate_NONE, Delegate_MTK_NEURON)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesDelegate()[index];
}

enum NNAPIExecutionPreference : int32_t {
  NNAPIExecutionPreference_UNDEFINED = 0,
  NNAPIExecutionPreference_NNAPI_LOW_POWER = 1,
  NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER = 2,
  NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED = 3,
  NNAPIExecutionPreference_MIN = NNAPIExecutionPreference_UNDEFINED,
  NNAPIExecutionPreference_MAX = NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED
};

inline const NNAPIExecutionPreference (&EnumValuesNNAPIExecutionPreference())[4] {
  static const NNAPIExecutionPreference values[] = {
    NNAPIExecutionPreference_UNDEFINED,
    NNAPIExecutionPreference_NNAPI_LOW_POWER,
    NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
    NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED
  };
  return values;
}

inline const char * const *EnumNamesNNAPIExecutionPreference() {
  static const char * const names[5] = {
    "UNDEFINED",
    "NNAPI_LOW_POWER",
    "NNAPI_FAST_SINGLE_ANSWER",
    "NNAPI_SUSTAINED_SPEED",
    nullptr
  };
  return names;
}

inline const char *EnumNameNNAPIExecutionPreference(NNAPIExecutionPreference e) {
  if (::flatbuffers::IsOutRange(e, NNAPIExecutionPreference_UNDEFINED, NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesNNAPIExecutionPreference()[index];
}

enum NNAPIExecutionPriority : int32_t {
  NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED = 0,
  NNAPIExecutionPriority_NNAPI_PRIORITY_LOW = 1,
  NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM = 2,
  NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH = 3,
  NNAPIExecutionPriority_MIN = NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
  NNAPIExecutionPriority_MAX = NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH
};

inline const NNAPIExecutionPriority (&EnumValuesNNAPIExecutionPriority())[4] {
  static const NNAPIExecutionPriority values[] = {
    NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
    NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
    NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
    NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH
  };
  return values;
}

inline const char * const *EnumNamesNNAPIExecutionPriority() {
  static const char * const names[5] = {
    "NNAPI_PRIORITY_UNDEFINED",
    "NNAPI_PRIORITY_LOW",
    "NNAPI_PRIORITY_MEDIUM",
    "NNAPI_PRIORITY_HIGH",
    nullptr
  };
  return names;
}

inline const char *EnumNameNNAPIExecutionPriority(NNAPIExecutionPriority e) {
  if (::flatbuffers::IsOutRange(e, NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesNNAPIExecutionPriority()[index];
}

enum GPUBackend : int32_t {
  GPUBackend_UNSET = 0,
  GPUBackend_OPENCL = 1,
  GPUBackend_OPENGL = 2,
  GPUBackend_MIN = GPUBackend_UNSET,
  GPUBackend_MAX = GPUBackend_OPENGL
};

inline const GPUBackend (&EnumValuesGPUBackend())[3] {
  static const GPUBackend values[] = {
    GPUBackend_UNSET,
    GPUBackend_OPENCL,
    GPUBackend_OPENGL
  };
  return values;
}

inline const char * const *EnumNamesGPUBackend() {
  static const char * const names[4] = {
    "UNSET",
    "OPENCL",
    "OPENGL",
    nullptr
  };
  return names;
}

inline const char *EnumNameGPUBackend(GPUBackend e) {
  if (::flatbuffers::IsOutRange(e, GPUBackend_UNSET, GPUBackend_OPENGL)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesGPUBackend()[index];
}

enum GPUInferencePriority : int32_t {
  GPUInferencePriority_GPU_PRIORITY_AUTO = 0,
  GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION = 1,
  GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY = 2,
  GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE = 3,
  GPUInferencePriority_MIN = GPUInferencePriority_GPU_PRIORITY_AUTO,
  GPUInferencePriority_MAX = GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE
};

inline const GPUInferencePriority (&EnumValuesGPUInferencePriority())[4] {
  static const GPUInferencePriority values[] = {
    GPUInferencePriority_GPU_PRIORITY_AUTO,
    GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION,
    GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY,
    GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE
  };
  return values;
}

inline const char * const *EnumNamesGPUInferencePriority() {
  static const char * const names[5] = {
    "GPU_PRIORITY_AUTO",
    "GPU_PRIORITY_MAX_PRECISION",
    "GPU_PRIORITY_MIN_LATENCY",
    "GPU_PRIORITY_MIN_MEMORY_USAGE",
    nullptr
  };
  return names;
}

inline const char *EnumNameGPUInferencePriority(GPUInferencePriority e) {
  if (::flatbuffers::IsOutRange(e, GPUInferencePriority_GPU_PRIORITY_AUTO, GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesGPUInferencePriority()[index];
}

enum GPUInferenceUsage : int32_t {
  GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER = 0,
  GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED = 1,
  GPUInferenceUsage_MIN = GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
  GPUInferenceUsage_MAX = GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED
};

inline const GPUInferenceUsage (&EnumValuesGPUInferenceUsage())[2] {
  static const GPUInferenceUsage values[] = {
    GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
    GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED
  };
  return values;
}

inline const char * const *EnumNamesGPUInferenceUsage() {
  static const char * const names[3] = {
    "GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER",
    "GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED",
    nullptr
  };
  return names;
}

inline const char *EnumNameGPUInferenceUsage(GPUInferenceUsage e) {
  if (::flatbuffers::IsOutRange(e, GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER, GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesGPUInferenceUsage()[index];
}

enum XNNPackFlags : int32_t {
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS = 0,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8 = 1,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8 = 2,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8 = 3,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16 = 4,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_DYNAMIC_FULLY_CONNECTED = 8,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_VARIABLE_OPERATORS = 16,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER = 32,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_LATEST_OPERATORS = 64,
  XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING = 128,
  XNNPackFlags_MIN = XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS,
  XNNPackFlags_MAX = XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING
};

inline const XNNPackFlags (&EnumValuesXNNPackFlags())[10] {
  static const XNNPackFlags values[] = {
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_DYNAMIC_FULLY_CONNECTED,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_VARIABLE_OPERATORS,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_LATEST_OPERATORS,
    XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING
  };
  return values;
}

inline const char *EnumNameXNNPackFlags(XNNPackFlags e) {
  switch (e) {
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS: return "TFLITE_XNNPACK_DELEGATE_NO_FLAGS";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8: return "TFLITE_XNNPACK_DELEGATE_FLAG_QS8";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8: return "TFLITE_XNNPACK_DELEGATE_FLAG_QU8";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8: return "TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16: return "TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_DYNAMIC_FULLY_CONNECTED: return "TFLITE_XNNPACK_DELEGATE_FLAG_DYNAMIC_FULLY_CONNECTED";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_VARIABLE_OPERATORS: return "TFLITE_XNNPACK_DELEGATE_FLAG_VARIABLE_OPERATORS";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER: return "TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_LATEST_OPERATORS: return "TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_LATEST_OPERATORS";
    case XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING: return "TFLITE_XNNPACK_DELEGATE_FLAG_ENABLE_SUBGRAPH_RESHAPING";
    default: return "";
  }
}

namespace CoreMLSettings_ {

enum EnabledDevices : int32_t {
  EnabledDevices_DEVICES_ALL = 0,
  EnabledDevices_DEVICES_WITH_NEURAL_ENGINE = 1,
  EnabledDevices_MIN = EnabledDevices_DEVICES_ALL,
  EnabledDevices_MAX = EnabledDevices_DEVICES_WITH_NEURAL_ENGINE
};

inline const EnabledDevices (&EnumValuesEnabledDevices())[2] {
  static const EnabledDevices values[] = {
    EnabledDevices_DEVICES_ALL,
    EnabledDevices_DEVICES_WITH_NEURAL_ENGINE
  };
  return values;
}

inline const char * const *EnumNamesEnabledDevices() {
  static const char * const names[3] = {
    "DEVICES_ALL",
    "DEVICES_WITH_NEURAL_ENGINE",
    nullptr
  };
  return names;
}

inline const char *EnumNameEnabledDevices(EnabledDevices e) {
  if (::flatbuffers::IsOutRange(e, EnabledDevices_DEVICES_ALL, EnabledDevices_DEVICES_WITH_NEURAL_ENGINE)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesEnabledDevices()[index];
}

}  // namespace CoreMLSettings_

namespace EdgeTpuDeviceSpec_ {

enum PlatformType : int32_t {
  PlatformType_MMIO = 0,
  PlatformType_REFERENCE = 1,
  PlatformType_SIMULATOR = 2,
  PlatformType_REMOTE_SIMULATOR = 3,
  PlatformType_MIN = PlatformType_MMIO,
  PlatformType_MAX = PlatformType_REMOTE_SIMULATOR
};

inline const PlatformType (&EnumValuesPlatformType())[4] {
  static const PlatformType values[] = {
    PlatformType_MMIO,
    PlatformType_REFERENCE,
    PlatformType_SIMULATOR,
    PlatformType_REMOTE_SIMULATOR
  };
  return values;
}

inline const char * const *EnumNamesPlatformType() {
  static const char * const names[5] = {
    "MMIO",
    "REFERENCE",
    "SIMULATOR",
    "REMOTE_SIMULATOR",
    nullptr
  };
  return names;
}

inline const char *EnumNamePlatformType(PlatformType e) {
  if (::flatbuffers::IsOutRange(e, PlatformType_MMIO, PlatformType_REMOTE_SIMULATOR)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesPlatformType()[index];
}

}  // namespace EdgeTpuDeviceSpec_

enum EdgeTpuPowerState : int32_t {
  EdgeTpuPowerState_UNDEFINED_POWERSTATE = 0,
  EdgeTpuPowerState_TPU_CORE_OFF = 1,
  EdgeTpuPowerState_READY = 2,
  EdgeTpuPowerState_ACTIVE_MIN_POWER = 3,
  EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER = 4,
  EdgeTpuPowerState_ACTIVE_LOW_POWER = 5,
  EdgeTpuPowerState_ACTIVE = 6,
  EdgeTpuPowerState_OVER_DRIVE = 7,
  EdgeTpuPowerState_MIN = EdgeTpuPowerState_UNDEFINED_POWERSTATE,
  EdgeTpuPowerState_MAX = EdgeTpuPowerState_OVER_DRIVE
};

inline const EdgeTpuPowerState (&EnumValuesEdgeTpuPowerState())[8] {
  static const EdgeTpuPowerState values[] = {
    EdgeTpuPowerState_UNDEFINED_POWERSTATE,
    EdgeTpuPowerState_TPU_CORE_OFF,
    EdgeTpuPowerState_READY,
    EdgeTpuPowerState_ACTIVE_MIN_POWER,
    EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER,
    EdgeTpuPowerState_ACTIVE_LOW_POWER,
    EdgeTpuPowerState_ACTIVE,
    EdgeTpuPowerState_OVER_DRIVE
  };
  return values;
}

inline const char * const *EnumNamesEdgeTpuPowerState() {
  static const char * const names[9] = {
    "UNDEFINED_POWERSTATE",
    "TPU_CORE_OFF",
    "READY",
    "ACTIVE_MIN_POWER",
    "ACTIVE_VERY_LOW_POWER",
    "ACTIVE_LOW_POWER",
    "ACTIVE",
    "OVER_DRIVE",
    nullptr
  };
  return names;
}

inline const char *EnumNameEdgeTpuPowerState(EdgeTpuPowerState e) {
  if (::flatbuffers::IsOutRange(e, EdgeTpuPowerState_UNDEFINED_POWERSTATE, EdgeTpuPowerState_OVER_DRIVE)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesEdgeTpuPowerState()[index];
}

namespace EdgeTpuSettings_ {

enum FloatTruncationType : int32_t {
  FloatTruncationType_UNSPECIFIED = 0,
  FloatTruncationType_NO_TRUNCATION = 1,
  FloatTruncationType_BFLOAT16 = 2,
  FloatTruncationType_HALF = 3,
  FloatTruncationType_MIN = FloatTruncationType_UNSPECIFIED,
  FloatTruncationType_MAX = FloatTruncationType_HALF
};

inline const FloatTruncationType (&EnumValuesFloatTruncationType())[4] {
  static const FloatTruncationType values[] = {
    FloatTruncationType_UNSPECIFIED,
    FloatTruncationType_NO_TRUNCATION,
    FloatTruncationType_BFLOAT16,
    FloatTruncationType_HALF
  };
  return values;
}

inline const char * const *EnumNamesFloatTruncationType() {
  static const char * const names[5] = {
    "UNSPECIFIED",
    "NO_TRUNCATION",
    "BFLOAT16",
    "HALF",
    nullptr
  };
  return names;
}

inline const char *EnumNameFloatTruncationType(FloatTruncationType e) {
  if (::flatbuffers::IsOutRange(e, FloatTruncationType_UNSPECIFIED, FloatTruncationType_HALF)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesFloatTruncationType()[index];
}

enum QosClass : int32_t {
  QosClass_QOS_UNDEFINED = 0,
  QosClass_BEST_EFFORT = 1,
  QosClass_REALTIME = 2,
  QosClass_MIN = QosClass_QOS_UNDEFINED,
  QosClass_MAX = QosClass_REALTIME
};

inline const QosClass (&EnumValuesQosClass())[3] {
  static const QosClass values[] = {
    QosClass_QOS_UNDEFINED,
    QosClass_BEST_EFFORT,
    QosClass_REALTIME
  };
  return values;
}

inline const char * const *EnumNamesQosClass() {
  static const char * const names[4] = {
    "QOS_UNDEFINED",
    "BEST_EFFORT",
    "REALTIME",
    nullptr
  };
  return names;
}

inline const char *EnumNameQosClass(QosClass e) {
  if (::flatbuffers::IsOutRange(e, QosClass_QOS_UNDEFINED, QosClass_REALTIME)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesQosClass()[index];
}

enum UseLayerIrTgcBackend : int32_t {
  UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED = 0,
  UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_NO = 1,
  UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_YES = 2,
  UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_AUTO = 3,
  UseLayerIrTgcBackend_MIN = UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED,
  UseLayerIrTgcBackend_MAX = UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_AUTO
};

inline const UseLayerIrTgcBackend (&EnumValuesUseLayerIrTgcBackend())[4] {
  static const UseLayerIrTgcBackend values[] = {
    UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED,
    UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_NO,
    UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_YES,
    UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_AUTO
  };
  return values;
}

inline const char * const *EnumNamesUseLayerIrTgcBackend() {
  static const char * const names[5] = {
    "USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED",
    "USE_LAYER_IR_TGC_BACKEND_NO",
    "USE_LAYER_IR_TGC_BACKEND_YES",
    "USE_LAYER_IR_TGC_BACKEND_AUTO",
    nullptr
  };
  return names;
}

inline const char *EnumNameUseLayerIrTgcBackend(UseLayerIrTgcBackend e) {
  if (::flatbuffers::IsOutRange(e, UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED, UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_AUTO)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesUseLayerIrTgcBackend()[index];
}

}  // namespace EdgeTpuSettings_

namespace GoogleEdgeTpuSettings_ {

enum Priority : int32_t {
  Priority_PRIORITY_UNDEFINED = 0,
  Priority_PRIORITY_LOW = 1,
  Priority_PRIORITY_MEDIUM = 2,
  Priority_PRIORITY_HIGH = 3,
  Priority_MIN = Priority_PRIORITY_UNDEFINED,
  Priority_MAX = Priority_PRIORITY_HIGH
};

inline const Priority (&EnumValuesPriority())[4] {
  static const Priority values[] = {
    Priority_PRIORITY_UNDEFINED,
    Priority_PRIORITY_LOW,
    Priority_PRIORITY_MEDIUM,
    Priority_PRIORITY_HIGH
  };
  return values;
}

inline const char * const *EnumNamesPriority() {
  static const char * const names[5] = {
    "PRIORITY_UNDEFINED",
    "PRIORITY_LOW",
    "PRIORITY_MEDIUM",
    "PRIORITY_HIGH",
    nullptr
  };
  return names;
}

inline const char *EnumNamePriority(Priority e) {
  if (::flatbuffers::IsOutRange(e, Priority_PRIORITY_UNDEFINED, Priority_PRIORITY_HIGH)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesPriority()[index];
}

enum TriState : int32_t {
  TriState_TRISTATE_UNDEFINED = 0,
  TriState_TRISTATE_FALSE = 1,
  TriState_TRISTATE_TRUE = 2,
  TriState_MIN = TriState_TRISTATE_UNDEFINED,
  TriState_MAX = TriState_TRISTATE_TRUE
};

inline const TriState (&EnumValuesTriState())[3] {
  static const TriState values[] = {
    TriState_TRISTATE_UNDEFINED,
    TriState_TRISTATE_FALSE,
    TriState_TRISTATE_TRUE
  };
  return values;
}

inline const char * const *EnumNamesTriState() {
  static const char * const names[4] = {
    "TRISTATE_UNDEFINED",
    "TRISTATE_FALSE",
    "TRISTATE_TRUE",
    nullptr
  };
  return names;
}

inline const char *EnumNameTriState(TriState e) {
  if (::flatbuffers::IsOutRange(e, TriState_TRISTATE_UNDEFINED, TriState_TRISTATE_TRUE)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesTriState()[index];
}

}  // namespace GoogleEdgeTpuSettings_

namespace CoralSettings_ {

enum Performance : int32_t {
  Performance_UNDEFINED = 0,
  Performance_MAXIMUM = 1,
  Performance_HIGH = 2,
  Performance_MEDIUM = 3,
  Performance_LOW = 4,
  Performance_MIN = Performance_UNDEFINED,
  Performance_MAX = Performance_LOW
};

inline const Performance (&EnumValuesPerformance())[5] {
  static const Performance values[] = {
    Performance_UNDEFINED,
    Performance_MAXIMUM,
    Performance_HIGH,
    Performance_MEDIUM,
    Performance_LOW
  };
  return values;
}

inline const char * const *EnumNamesPerformance() {
  static const char * const names[6] = {
    "UNDEFINED",
    "MAXIMUM",
    "HIGH",
    "MEDIUM",
    "LOW",
    nullptr
  };
  return names;
}

inline const char *EnumNamePerformance(Performance e) {
  if (::flatbuffers::IsOutRange(e, Performance_UNDEFINED, Performance_LOW)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesPerformance()[index];
}

}  // namespace CoralSettings_

namespace MtkNeuronSettings_ {

enum ExecutionPreference : int32_t {
  ExecutionPreference_PREFERENCE_UNDEFINED = 0,
  ExecutionPreference_PREFERENCE_LOW_POWER = 1,
  ExecutionPreference_PREFERENCE_FAST_SINGLE_ANSWER = 2,
  ExecutionPreference_PREFERENCE_SUSTAINED_SPEED = 3,
  ExecutionPreference_PREFERENCE_TURBO_BOOST = 4,
  ExecutionPreference_MIN = ExecutionPreference_PREFERENCE_UNDEFINED,
  ExecutionPreference_MAX = ExecutionPreference_PREFERENCE_TURBO_BOOST
};

inline const ExecutionPreference (&EnumValuesExecutionPreference())[5] {
  static const ExecutionPreference values[] = {
    ExecutionPreference_PREFERENCE_UNDEFINED,
    ExecutionPreference_PREFERENCE_LOW_POWER,
    ExecutionPreference_PREFERENCE_FAST_SINGLE_ANSWER,
    ExecutionPreference_PREFERENCE_SUSTAINED_SPEED,
    ExecutionPreference_PREFERENCE_TURBO_BOOST
  };
  return values;
}

inline const char * const *EnumNamesExecutionPreference() {
  static const char * const names[6] = {
    "PREFERENCE_UNDEFINED",
    "PREFERENCE_LOW_POWER",
    "PREFERENCE_FAST_SINGLE_ANSWER",
    "PREFERENCE_SUSTAINED_SPEED",
    "PREFERENCE_TURBO_BOOST",
    nullptr
  };
  return names;
}

inline const char *EnumNameExecutionPreference(ExecutionPreference e) {
  if (::flatbuffers::IsOutRange(e, ExecutionPreference_PREFERENCE_UNDEFINED, ExecutionPreference_PREFERENCE_TURBO_BOOST)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesExecutionPreference()[index];
}

enum ExecutionPriority : int32_t {
  ExecutionPriority_PRIORITY_UNDEFINED = 0,
  ExecutionPriority_PRIORITY_LOW = 90,
  ExecutionPriority_PRIORITY_MEDIUM = 100,
  ExecutionPriority_PRIORITY_HIGH = 110,
  ExecutionPriority_MIN = ExecutionPriority_PRIORITY_UNDEFINED,
  ExecutionPriority_MAX = ExecutionPriority_PRIORITY_HIGH
};

inline const ExecutionPriority (&EnumValuesExecutionPriority())[4] {
  static const ExecutionPriority values[] = {
    ExecutionPriority_PRIORITY_UNDEFINED,
    ExecutionPriority_PRIORITY_LOW,
    ExecutionPriority_PRIORITY_MEDIUM,
    ExecutionPriority_PRIORITY_HIGH
  };
  return values;
}

inline const char *EnumNameExecutionPriority(ExecutionPriority e) {
  switch (e) {
    case ExecutionPriority_PRIORITY_UNDEFINED: return "PRIORITY_UNDEFINED";
    case ExecutionPriority_PRIORITY_LOW: return "PRIORITY_LOW";
    case ExecutionPriority_PRIORITY_MEDIUM: return "PRIORITY_MEDIUM";
    case ExecutionPriority_PRIORITY_HIGH: return "PRIORITY_HIGH";
    default: return "";
  }
}

enum OptimizationHint : int32_t {
  OptimizationHint_OPTIMIZATION_NONE = 0,
  OptimizationHint_OPTIMIZATION_LOW_LATENCY = 1,
  OptimizationHint_OPTIMIZATION_DEEP_FUSION = 2,
  OptimizationHint_OPTIMIZATION_BATCH_PROCESSING = 3,
  OptimizationHint_MIN = OptimizationHint_OPTIMIZATION_NONE,
  OptimizationHint_MAX = OptimizationHint_OPTIMIZATION_BATCH_PROCESSING
};

inline const OptimizationHint (&EnumValuesOptimizationHint())[4] {
  static const OptimizationHint values[] = {
    OptimizationHint_OPTIMIZATION_NONE,
    OptimizationHint_OPTIMIZATION_LOW_LATENCY,
    OptimizationHint_OPTIMIZATION_DEEP_FUSION,
    OptimizationHint_OPTIMIZATION_BATCH_PROCESSING
  };
  return values;
}

inline const char * const *EnumNamesOptimizationHint() {
  static const char * const names[5] = {
    "OPTIMIZATION_NONE",
    "OPTIMIZATION_LOW_LATENCY",
    "OPTIMIZATION_DEEP_FUSION",
    "OPTIMIZATION_BATCH_PROCESSING",
    nullptr
  };
  return names;
}

inline const char *EnumNameOptimizationHint(OptimizationHint e) {
  if (::flatbuffers::IsOutRange(e, OptimizationHint_OPTIMIZATION_NONE, OptimizationHint_OPTIMIZATION_BATCH_PROCESSING)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesOptimizationHint()[index];
}

enum OperationCheckMode : int32_t {
  OperationCheckMode_NO_OPERATION_CHECK = 0,
  OperationCheckMode_PER_NODE_OPERATION_CHECK = 1,
  OperationCheckMode_PRE_OPERATION_CHECK = 2,
  OperationCheckMode_MIN = OperationCheckMode_NO_OPERATION_CHECK,
  OperationCheckMode_MAX = OperationCheckMode_PRE_OPERATION_CHECK
};

inline const OperationCheckMode (&EnumValuesOperationCheckMode())[3] {
  static const OperationCheckMode values[] = {
    OperationCheckMode_NO_OPERATION_CHECK,
    OperationCheckMode_PER_NODE_OPERATION_CHECK,
    OperationCheckMode_PRE_OPERATION_CHECK
  };
  return values;
}

inline const char * const *EnumNamesOperationCheckMode() {
  static const char * const names[4] = {
    "NO_OPERATION_CHECK",
    "PER_NODE_OPERATION_CHECK",
    "PRE_OPERATION_CHECK",
    nullptr
  };
  return names;
}

inline const char *EnumNameOperationCheckMode(OperationCheckMode e) {
  if (::flatbuffers::IsOutRange(e, OperationCheckMode_NO_OPERATION_CHECK, OperationCheckMode_PRE_OPERATION_CHECK)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesOperationCheckMode()[index];
}

}  // namespace MtkNeuronSettings_

enum BenchmarkEventType : int32_t {
  BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE = 0,
  BenchmarkEventType_START = 1,
  BenchmarkEventType_END = 2,
  BenchmarkEventType_ERROR = 3,
  BenchmarkEventType_LOGGED = 4,
  BenchmarkEventType_RECOVERED_ERROR = 5,
  BenchmarkEventType_MIN = BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE,
  BenchmarkEventType_MAX = BenchmarkEventType_RECOVERED_ERROR
};

inline const BenchmarkEventType (&EnumValuesBenchmarkEventType())[6] {
  static const BenchmarkEventType values[] = {
    BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE,
    BenchmarkEventType_START,
    BenchmarkEventType_END,
    BenchmarkEventType_ERROR,
    BenchmarkEventType_LOGGED,
    BenchmarkEventType_RECOVERED_ERROR
  };
  return values;
}

inline const char * const *EnumNamesBenchmarkEventType() {
  static const char * const names[7] = {
    "UNDEFINED_BENCHMARK_EVENT_TYPE",
    "START",
    "END",
    "ERROR",
    "LOGGED",
    "RECOVERED_ERROR",
    nullptr
  };
  return names;
}

inline const char *EnumNameBenchmarkEventType(BenchmarkEventType e) {
  if (::flatbuffers::IsOutRange(e, BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, BenchmarkEventType_RECOVERED_ERROR)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesBenchmarkEventType()[index];
}

enum BenchmarkStage : int32_t {
  BenchmarkStage_UNKNOWN = 0,
  BenchmarkStage_INITIALIZATION = 1,
  BenchmarkStage_INFERENCE = 2,
  BenchmarkStage_MIN = BenchmarkStage_UNKNOWN,
  BenchmarkStage_MAX = BenchmarkStage_INFERENCE
};

inline const BenchmarkStage (&EnumValuesBenchmarkStage())[3] {
  static const BenchmarkStage values[] = {
    BenchmarkStage_UNKNOWN,
    BenchmarkStage_INITIALIZATION,
    BenchmarkStage_INFERENCE
  };
  return values;
}

inline const char * const *EnumNamesBenchmarkStage() {
  static const char * const names[4] = {
    "UNKNOWN",
    "INITIALIZATION",
    "INFERENCE",
    nullptr
  };
  return names;
}

inline const char *EnumNameBenchmarkStage(BenchmarkStage e) {
  if (::flatbuffers::IsOutRange(e, BenchmarkStage_UNKNOWN, BenchmarkStage_INFERENCE)) return "";
  const size_t index = static_cast<size_t>(e);
  return EnumNamesBenchmarkStage()[index];
}

struct ComputeSettingsT : public ::flatbuffers::NativeTable {
  typedef ComputeSettings TableType;
  tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY;
  std::unique_ptr<tflite::TFLiteSettingsT> tflite_settings{};
  std::string model_namespace_for_statistics{};
  std::string model_identifier_for_statistics{};
  std::unique_ptr<tflite::MinibenchmarkSettingsT> settings_to_test_locally{};
  ComputeSettingsT() = default;
  ComputeSettingsT(const ComputeSettingsT &o);
  ComputeSettingsT(ComputeSettingsT&&) FLATBUFFERS_NOEXCEPT = default;
  ComputeSettingsT &operator=(ComputeSettingsT o) FLATBUFFERS_NOEXCEPT;
};

struct ComputeSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef ComputeSettingsT NativeTableType;
  typedef ComputeSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_PREFERENCE = 4,
    VT_TFLITE_SETTINGS = 6,
    VT_MODEL_NAMESPACE_FOR_STATISTICS = 8,
    VT_MODEL_IDENTIFIER_FOR_STATISTICS = 10,
    VT_SETTINGS_TO_TEST_LOCALLY = 12
  };
  tflite::ExecutionPreference preference() const {
    return static_cast<tflite::ExecutionPreference>(GetField<int32_t>(VT_PREFERENCE, 0));
  }
  const tflite::TFLiteSettings *tflite_settings() const {
    return GetPointer<const tflite::TFLiteSettings *>(VT_TFLITE_SETTINGS);
  }
  const ::flatbuffers::String *model_namespace_for_statistics() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_NAMESPACE_FOR_STATISTICS);
  }
  const ::flatbuffers::String *model_identifier_for_statistics() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_IDENTIFIER_FOR_STATISTICS);
  }
  const tflite::MinibenchmarkSettings *settings_to_test_locally() const {
    return GetPointer<const tflite::MinibenchmarkSettings *>(VT_SETTINGS_TO_TEST_LOCALLY);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_PREFERENCE, 4) &&
           VerifyOffset(verifier, VT_TFLITE_SETTINGS) &&
           verifier.VerifyTable(tflite_settings()) &&
           VerifyOffset(verifier, VT_MODEL_NAMESPACE_FOR_STATISTICS) &&
           verifier.VerifyString(model_namespace_for_statistics()) &&
           VerifyOffset(verifier, VT_MODEL_IDENTIFIER_FOR_STATISTICS) &&
           verifier.VerifyString(model_identifier_for_statistics()) &&
           VerifyOffset(verifier, VT_SETTINGS_TO_TEST_LOCALLY) &&
           verifier.VerifyTable(settings_to_test_locally()) &&
           verifier.EndTable();
  }
  ComputeSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(ComputeSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<ComputeSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct ComputeSettingsBuilder {
  typedef ComputeSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_preference(tflite::ExecutionPreference preference) {
    fbb_.AddElement<int32_t>(ComputeSettings::VT_PREFERENCE, static_cast<int32_t>(preference), 0);
  }
  void add_tflite_settings(::flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings) {
    fbb_.AddOffset(ComputeSettings::VT_TFLITE_SETTINGS, tflite_settings);
  }
  void add_model_namespace_for_statistics(::flatbuffers::Offset<::flatbuffers::String> model_namespace_for_statistics) {
    fbb_.AddOffset(ComputeSettings::VT_MODEL_NAMESPACE_FOR_STATISTICS, model_namespace_for_statistics);
  }
  void add_model_identifier_for_statistics(::flatbuffers::Offset<::flatbuffers::String> model_identifier_for_statistics) {
    fbb_.AddOffset(ComputeSettings::VT_MODEL_IDENTIFIER_FOR_STATISTICS, model_identifier_for_statistics);
  }
  void add_settings_to_test_locally(::flatbuffers::Offset<tflite::MinibenchmarkSettings> settings_to_test_locally) {
    fbb_.AddOffset(ComputeSettings::VT_SETTINGS_TO_TEST_LOCALLY, settings_to_test_locally);
  }
  explicit ComputeSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<ComputeSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<ComputeSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<ComputeSettings> CreateComputeSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY,
    ::flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_namespace_for_statistics = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_identifier_for_statistics = 0,
    ::flatbuffers::Offset<tflite::MinibenchmarkSettings> settings_to_test_locally = 0) {
  ComputeSettingsBuilder builder_(_fbb);
  builder_.add_settings_to_test_locally(settings_to_test_locally);
  builder_.add_model_identifier_for_statistics(model_identifier_for_statistics);
  builder_.add_model_namespace_for_statistics(model_namespace_for_statistics);
  builder_.add_tflite_settings(tflite_settings);
  builder_.add_preference(preference);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<ComputeSettings> CreateComputeSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY,
    ::flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0,
    const char *model_namespace_for_statistics = nullptr,
    const char *model_identifier_for_statistics = nullptr,
    ::flatbuffers::Offset<tflite::MinibenchmarkSettings> settings_to_test_locally = 0) {
  auto model_namespace_for_statistics__ = model_namespace_for_statistics ? _fbb.CreateString(model_namespace_for_statistics) : 0;
  auto model_identifier_for_statistics__ = model_identifier_for_statistics ? _fbb.CreateString(model_identifier_for_statistics) : 0;
  return tflite::CreateComputeSettings(
      _fbb,
      preference,
      tflite_settings,
      model_namespace_for_statistics__,
      model_identifier_for_statistics__,
      settings_to_test_locally);
}

::flatbuffers::Offset<ComputeSettings> CreateComputeSettings(::flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct NNAPISettingsT : public ::flatbuffers::NativeTable {
  typedef NNAPISettings TableType;
  std::string accelerator_name{};
  std::string cache_directory{};
  std::string model_token{};
  tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED;
  int32_t no_of_nnapi_instances_to_cache = 0;
  std::unique_ptr<tflite::FallbackSettingsT> fallback_settings{};
  bool allow_nnapi_cpu_on_android_10_plus = false;
  tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
  bool allow_dynamic_dimensions = false;
  bool allow_fp16_precision_for_fp32 = false;
  bool use_burst_computation = false;
  int64_t support_library_handle = 0;
  NNAPISettingsT() = default;
  NNAPISettingsT(const NNAPISettingsT &o);
  NNAPISettingsT(NNAPISettingsT&&) FLATBUFFERS_NOEXCEPT = default;
  NNAPISettingsT &operator=(NNAPISettingsT o) FLATBUFFERS_NOEXCEPT;
};

struct NNAPISettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef NNAPISettingsT NativeTableType;
  typedef NNAPISettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_ACCELERATOR_NAME = 4,
    VT_CACHE_DIRECTORY = 6,
    VT_MODEL_TOKEN = 8,
    VT_EXECUTION_PREFERENCE = 10,
    VT_NO_OF_NNAPI_INSTANCES_TO_CACHE = 12,
    VT_FALLBACK_SETTINGS = 14,
    VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS = 16,
    VT_EXECUTION_PRIORITY = 18,
    VT_ALLOW_DYNAMIC_DIMENSIONS = 20,
    VT_ALLOW_FP16_PRECISION_FOR_FP32 = 22,
    VT_USE_BURST_COMPUTATION = 24,
    VT_SUPPORT_LIBRARY_HANDLE = 26
  };
  const ::flatbuffers::String *accelerator_name() const {
    return GetPointer<const ::flatbuffers::String *>(VT_ACCELERATOR_NAME);
  }
  const ::flatbuffers::String *cache_directory() const {
    return GetPointer<const ::flatbuffers::String *>(VT_CACHE_DIRECTORY);
  }
  const ::flatbuffers::String *model_token() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_TOKEN);
  }
  tflite::NNAPIExecutionPreference execution_preference() const {
    return static_cast<tflite::NNAPIExecutionPreference>(GetField<int32_t>(VT_EXECUTION_PREFERENCE, 0));
  }
  int32_t no_of_nnapi_instances_to_cache() const {
    return GetField<int32_t>(VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, 0);
  }
  const tflite::FallbackSettings *fallback_settings() const {
    return GetPointer<const tflite::FallbackSettings *>(VT_FALLBACK_SETTINGS);
  }
  bool allow_nnapi_cpu_on_android_10_plus() const {
    return GetField<uint8_t>(VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, 0) != 0;
  }
  tflite::NNAPIExecutionPriority execution_priority() const {
    return static_cast<tflite::NNAPIExecutionPriority>(GetField<int32_t>(VT_EXECUTION_PRIORITY, 0));
  }
  bool allow_dynamic_dimensions() const {
    return GetField<uint8_t>(VT_ALLOW_DYNAMIC_DIMENSIONS, 0) != 0;
  }
  bool allow_fp16_precision_for_fp32() const {
    return GetField<uint8_t>(VT_ALLOW_FP16_PRECISION_FOR_FP32, 0) != 0;
  }
  bool use_burst_computation() const {
    return GetField<uint8_t>(VT_USE_BURST_COMPUTATION, 0) != 0;
  }
  int64_t support_library_handle() const {
    return GetField<int64_t>(VT_SUPPORT_LIBRARY_HANDLE, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_ACCELERATOR_NAME) &&
           verifier.VerifyString(accelerator_name()) &&
           VerifyOffset(verifier, VT_CACHE_DIRECTORY) &&
           verifier.VerifyString(cache_directory()) &&
           VerifyOffset(verifier, VT_MODEL_TOKEN) &&
           verifier.VerifyString(model_token()) &&
           VerifyField<int32_t>(verifier, VT_EXECUTION_PREFERENCE, 4) &&
           VerifyField<int32_t>(verifier, VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, 4) &&
           VerifyOffset(verifier, VT_FALLBACK_SETTINGS) &&
           verifier.VerifyTable(fallback_settings()) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, 1) &&
           VerifyField<int32_t>(verifier, VT_EXECUTION_PRIORITY, 4) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_DYNAMIC_DIMENSIONS, 1) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_FP16_PRECISION_FOR_FP32, 1) &&
           VerifyField<uint8_t>(verifier, VT_USE_BURST_COMPUTATION, 1) &&
           VerifyField<int64_t>(verifier, VT_SUPPORT_LIBRARY_HANDLE, 8) &&
           verifier.EndTable();
  }
  NNAPISettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(NNAPISettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<NNAPISettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct NNAPISettingsBuilder {
  typedef NNAPISettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_accelerator_name(::flatbuffers::Offset<::flatbuffers::String> accelerator_name) {
    fbb_.AddOffset(NNAPISettings::VT_ACCELERATOR_NAME, accelerator_name);
  }
  void add_cache_directory(::flatbuffers::Offset<::flatbuffers::String> cache_directory) {
    fbb_.AddOffset(NNAPISettings::VT_CACHE_DIRECTORY, cache_directory);
  }
  void add_model_token(::flatbuffers::Offset<::flatbuffers::String> model_token) {
    fbb_.AddOffset(NNAPISettings::VT_MODEL_TOKEN, model_token);
  }
  void add_execution_preference(tflite::NNAPIExecutionPreference execution_preference) {
    fbb_.AddElement<int32_t>(NNAPISettings::VT_EXECUTION_PREFERENCE, static_cast<int32_t>(execution_preference), 0);
  }
  void add_no_of_nnapi_instances_to_cache(int32_t no_of_nnapi_instances_to_cache) {
    fbb_.AddElement<int32_t>(NNAPISettings::VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, no_of_nnapi_instances_to_cache, 0);
  }
  void add_fallback_settings(::flatbuffers::Offset<tflite::FallbackSettings> fallback_settings) {
    fbb_.AddOffset(NNAPISettings::VT_FALLBACK_SETTINGS, fallback_settings);
  }
  void add_allow_nnapi_cpu_on_android_10_plus(bool allow_nnapi_cpu_on_android_10_plus) {
    fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, static_cast<uint8_t>(allow_nnapi_cpu_on_android_10_plus), 0);
  }
  void add_execution_priority(tflite::NNAPIExecutionPriority execution_priority) {
    fbb_.AddElement<int32_t>(NNAPISettings::VT_EXECUTION_PRIORITY, static_cast<int32_t>(execution_priority), 0);
  }
  void add_allow_dynamic_dimensions(bool allow_dynamic_dimensions) {
    fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_DYNAMIC_DIMENSIONS, static_cast<uint8_t>(allow_dynamic_dimensions), 0);
  }
  void add_allow_fp16_precision_for_fp32(bool allow_fp16_precision_for_fp32) {
    fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_FP16_PRECISION_FOR_FP32, static_cast<uint8_t>(allow_fp16_precision_for_fp32), 0);
  }
  void add_use_burst_computation(bool use_burst_computation) {
    fbb_.AddElement<uint8_t>(NNAPISettings::VT_USE_BURST_COMPUTATION, static_cast<uint8_t>(use_burst_computation), 0);
  }
  void add_support_library_handle(int64_t support_library_handle) {
    fbb_.AddElement<int64_t>(NNAPISettings::VT_SUPPORT_LIBRARY_HANDLE, support_library_handle, 0);
  }
  explicit NNAPISettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<NNAPISettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<NNAPISettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> accelerator_name = 0,
    ::flatbuffers::Offset<::flatbuffers::String> cache_directory = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_token = 0,
    tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED,
    int32_t no_of_nnapi_instances_to_cache = 0,
    ::flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0,
    bool allow_nnapi_cpu_on_android_10_plus = false,
    tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
    bool allow_dynamic_dimensions = false,
    bool allow_fp16_precision_for_fp32 = false,
    bool use_burst_computation = false,
    int64_t support_library_handle = 0) {
  NNAPISettingsBuilder builder_(_fbb);
  builder_.add_support_library_handle(support_library_handle);
  builder_.add_execution_priority(execution_priority);
  builder_.add_fallback_settings(fallback_settings);
  builder_.add_no_of_nnapi_instances_to_cache(no_of_nnapi_instances_to_cache);
  builder_.add_execution_preference(execution_preference);
  builder_.add_model_token(model_token);
  builder_.add_cache_directory(cache_directory);
  builder_.add_accelerator_name(accelerator_name);
  builder_.add_use_burst_computation(use_burst_computation);
  builder_.add_allow_fp16_precision_for_fp32(allow_fp16_precision_for_fp32);
  builder_.add_allow_dynamic_dimensions(allow_dynamic_dimensions);
  builder_.add_allow_nnapi_cpu_on_android_10_plus(allow_nnapi_cpu_on_android_10_plus);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<NNAPISettings> CreateNNAPISettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *accelerator_name = nullptr,
    const char *cache_directory = nullptr,
    const char *model_token = nullptr,
    tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED,
    int32_t no_of_nnapi_instances_to_cache = 0,
    ::flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0,
    bool allow_nnapi_cpu_on_android_10_plus = false,
    tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
    bool allow_dynamic_dimensions = false,
    bool allow_fp16_precision_for_fp32 = false,
    bool use_burst_computation = false,
    int64_t support_library_handle = 0) {
  auto accelerator_name__ = accelerator_name ? _fbb.CreateString(accelerator_name) : 0;
  auto cache_directory__ = cache_directory ? _fbb.CreateString(cache_directory) : 0;
  auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0;
  return tflite::CreateNNAPISettings(
      _fbb,
      accelerator_name__,
      cache_directory__,
      model_token__,
      execution_preference,
      no_of_nnapi_instances_to_cache,
      fallback_settings,
      allow_nnapi_cpu_on_android_10_plus,
      execution_priority,
      allow_dynamic_dimensions,
      allow_fp16_precision_for_fp32,
      use_burst_computation,
      support_library_handle);
}

::flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(::flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct GPUSettingsT : public ::flatbuffers::NativeTable {
  typedef GPUSettings TableType;
  bool is_precision_loss_allowed = false;
  bool enable_quantized_inference = true;
  tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET;
  tflite::GPUInferencePriority inference_priority1 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO;
  tflite::GPUInferencePriority inference_priority2 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO;
  tflite::GPUInferencePriority inference_priority3 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO;
  tflite::GPUInferenceUsage inference_preference = tflite::GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
  std::string cache_directory{};
  std::string model_token{};
};

struct GPUSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef GPUSettingsT NativeTableType;
  typedef GPUSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_IS_PRECISION_LOSS_ALLOWED = 4,
    VT_ENABLE_QUANTIZED_INFERENCE = 6,
    VT_FORCE_BACKEND = 8,
    VT_INFERENCE_PRIORITY1 = 10,
    VT_INFERENCE_PRIORITY2 = 12,
    VT_INFERENCE_PRIORITY3 = 14,
    VT_INFERENCE_PREFERENCE = 16,
    VT_CACHE_DIRECTORY = 18,
    VT_MODEL_TOKEN = 20
  };
  bool is_precision_loss_allowed() const {
    return GetField<uint8_t>(VT_IS_PRECISION_LOSS_ALLOWED, 0) != 0;
  }
  bool enable_quantized_inference() const {
    return GetField<uint8_t>(VT_ENABLE_QUANTIZED_INFERENCE, 1) != 0;
  }
  tflite::GPUBackend force_backend() const {
    return static_cast<tflite::GPUBackend>(GetField<int32_t>(VT_FORCE_BACKEND, 0));
  }
  tflite::GPUInferencePriority inference_priority1() const {
    return static_cast<tflite::GPUInferencePriority>(GetField<int32_t>(VT_INFERENCE_PRIORITY1, 0));
  }
  tflite::GPUInferencePriority inference_priority2() const {
    return static_cast<tflite::GPUInferencePriority>(GetField<int32_t>(VT_INFERENCE_PRIORITY2, 0));
  }
  tflite::GPUInferencePriority inference_priority3() const {
    return static_cast<tflite::GPUInferencePriority>(GetField<int32_t>(VT_INFERENCE_PRIORITY3, 0));
  }
  tflite::GPUInferenceUsage inference_preference() const {
    return static_cast<tflite::GPUInferenceUsage>(GetField<int32_t>(VT_INFERENCE_PREFERENCE, 0));
  }
  const ::flatbuffers::String *cache_directory() const {
    return GetPointer<const ::flatbuffers::String *>(VT_CACHE_DIRECTORY);
  }
  const ::flatbuffers::String *model_token() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_TOKEN);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<uint8_t>(verifier, VT_IS_PRECISION_LOSS_ALLOWED, 1) &&
           VerifyField<uint8_t>(verifier, VT_ENABLE_QUANTIZED_INFERENCE, 1) &&
           VerifyField<int32_t>(verifier, VT_FORCE_BACKEND, 4) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_PRIORITY1, 4) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_PRIORITY2, 4) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_PRIORITY3, 4) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_PREFERENCE, 4) &&
           VerifyOffset(verifier, VT_CACHE_DIRECTORY) &&
           verifier.VerifyString(cache_directory()) &&
           VerifyOffset(verifier, VT_MODEL_TOKEN) &&
           verifier.VerifyString(model_token()) &&
           verifier.EndTable();
  }
  GPUSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(GPUSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<GPUSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct GPUSettingsBuilder {
  typedef GPUSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_is_precision_loss_allowed(bool is_precision_loss_allowed) {
    fbb_.AddElement<uint8_t>(GPUSettings::VT_IS_PRECISION_LOSS_ALLOWED, static_cast<uint8_t>(is_precision_loss_allowed), 0);
  }
  void add_enable_quantized_inference(bool enable_quantized_inference) {
    fbb_.AddElement<uint8_t>(GPUSettings::VT_ENABLE_QUANTIZED_INFERENCE, static_cast<uint8_t>(enable_quantized_inference), 1);
  }
  void add_force_backend(tflite::GPUBackend force_backend) {
    fbb_.AddElement<int32_t>(GPUSettings::VT_FORCE_BACKEND, static_cast<int32_t>(force_backend), 0);
  }
  void add_inference_priority1(tflite::GPUInferencePriority inference_priority1) {
    fbb_.AddElement<int32_t>(GPUSettings::VT_INFERENCE_PRIORITY1, static_cast<int32_t>(inference_priority1), 0);
  }
  void add_inference_priority2(tflite::GPUInferencePriority inference_priority2) {
    fbb_.AddElement<int32_t>(GPUSettings::VT_INFERENCE_PRIORITY2, static_cast<int32_t>(inference_priority2), 0);
  }
  void add_inference_priority3(tflite::GPUInferencePriority inference_priority3) {
    fbb_.AddElement<int32_t>(GPUSettings::VT_INFERENCE_PRIORITY3, static_cast<int32_t>(inference_priority3), 0);
  }
  void add_inference_preference(tflite::GPUInferenceUsage inference_preference) {
    fbb_.AddElement<int32_t>(GPUSettings::VT_INFERENCE_PREFERENCE, static_cast<int32_t>(inference_preference), 0);
  }
  void add_cache_directory(::flatbuffers::Offset<::flatbuffers::String> cache_directory) {
    fbb_.AddOffset(GPUSettings::VT_CACHE_DIRECTORY, cache_directory);
  }
  void add_model_token(::flatbuffers::Offset<::flatbuffers::String> model_token) {
    fbb_.AddOffset(GPUSettings::VT_MODEL_TOKEN, model_token);
  }
  explicit GPUSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<GPUSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<GPUSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<GPUSettings> CreateGPUSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    bool is_precision_loss_allowed = false,
    bool enable_quantized_inference = true,
    tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET,
    tflite::GPUInferencePriority inference_priority1 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO,
    tflite::GPUInferencePriority inference_priority2 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO,
    tflite::GPUInferencePriority inference_priority3 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO,
    tflite::GPUInferenceUsage inference_preference = tflite::GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
    ::flatbuffers::Offset<::flatbuffers::String> cache_directory = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_token = 0) {
  GPUSettingsBuilder builder_(_fbb);
  builder_.add_model_token(model_token);
  builder_.add_cache_directory(cache_directory);
  builder_.add_inference_preference(inference_preference);
  builder_.add_inference_priority3(inference_priority3);
  builder_.add_inference_priority2(inference_priority2);
  builder_.add_inference_priority1(inference_priority1);
  builder_.add_force_backend(force_backend);
  builder_.add_enable_quantized_inference(enable_quantized_inference);
  builder_.add_is_precision_loss_allowed(is_precision_loss_allowed);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<GPUSettings> CreateGPUSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    bool is_precision_loss_allowed = false,
    bool enable_quantized_inference = true,
    tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET,
    tflite::GPUInferencePriority inference_priority1 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO,
    tflite::GPUInferencePriority inference_priority2 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO,
    tflite::GPUInferencePriority inference_priority3 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO,
    tflite::GPUInferenceUsage inference_preference = tflite::GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
    const char *cache_directory = nullptr,
    const char *model_token = nullptr) {
  auto cache_directory__ = cache_directory ? _fbb.CreateString(cache_directory) : 0;
  auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0;
  return tflite::CreateGPUSettings(
      _fbb,
      is_precision_loss_allowed,
      enable_quantized_inference,
      force_backend,
      inference_priority1,
      inference_priority2,
      inference_priority3,
      inference_preference,
      cache_directory__,
      model_token__);
}

::flatbuffers::Offset<GPUSettings> CreateGPUSettings(::flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct HexagonSettingsT : public ::flatbuffers::NativeTable {
  typedef HexagonSettings TableType;
  int32_t debug_level = 0;
  int32_t powersave_level = 0;
  bool print_graph_profile = false;
  bool print_graph_debug = false;
};

struct HexagonSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef HexagonSettingsT NativeTableType;
  typedef HexagonSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_DEBUG_LEVEL = 4,
    VT_POWERSAVE_LEVEL = 6,
    VT_PRINT_GRAPH_PROFILE = 8,
    VT_PRINT_GRAPH_DEBUG = 10
  };
  int32_t debug_level() const {
    return GetField<int32_t>(VT_DEBUG_LEVEL, 0);
  }
  int32_t powersave_level() const {
    return GetField<int32_t>(VT_POWERSAVE_LEVEL, 0);
  }
  bool print_graph_profile() const {
    return GetField<uint8_t>(VT_PRINT_GRAPH_PROFILE, 0) != 0;
  }
  bool print_graph_debug() const {
    return GetField<uint8_t>(VT_PRINT_GRAPH_DEBUG, 0) != 0;
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_DEBUG_LEVEL, 4) &&
           VerifyField<int32_t>(verifier, VT_POWERSAVE_LEVEL, 4) &&
           VerifyField<uint8_t>(verifier, VT_PRINT_GRAPH_PROFILE, 1) &&
           VerifyField<uint8_t>(verifier, VT_PRINT_GRAPH_DEBUG, 1) &&
           verifier.EndTable();
  }
  HexagonSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(HexagonSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<HexagonSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct HexagonSettingsBuilder {
  typedef HexagonSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_debug_level(int32_t debug_level) {
    fbb_.AddElement<int32_t>(HexagonSettings::VT_DEBUG_LEVEL, debug_level, 0);
  }
  void add_powersave_level(int32_t powersave_level) {
    fbb_.AddElement<int32_t>(HexagonSettings::VT_POWERSAVE_LEVEL, powersave_level, 0);
  }
  void add_print_graph_profile(bool print_graph_profile) {
    fbb_.AddElement<uint8_t>(HexagonSettings::VT_PRINT_GRAPH_PROFILE, static_cast<uint8_t>(print_graph_profile), 0);
  }
  void add_print_graph_debug(bool print_graph_debug) {
    fbb_.AddElement<uint8_t>(HexagonSettings::VT_PRINT_GRAPH_DEBUG, static_cast<uint8_t>(print_graph_debug), 0);
  }
  explicit HexagonSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<HexagonSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<HexagonSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t debug_level = 0,
    int32_t powersave_level = 0,
    bool print_graph_profile = false,
    bool print_graph_debug = false) {
  HexagonSettingsBuilder builder_(_fbb);
  builder_.add_powersave_level(powersave_level);
  builder_.add_debug_level(debug_level);
  builder_.add_print_graph_debug(print_graph_debug);
  builder_.add_print_graph_profile(print_graph_profile);
  return builder_.Finish();
}

::flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(::flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct XNNPackSettingsT : public ::flatbuffers::NativeTable {
  typedef XNNPackSettings TableType;
  int32_t num_threads = 0;
  tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS;
  std::string weight_cache_file_path{};
};

struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef XNNPackSettingsT NativeTableType;
  typedef XNNPackSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_NUM_THREADS = 4,
    VT_FLAGS = 6,
    VT_WEIGHT_CACHE_FILE_PATH = 8
  };
  int32_t num_threads() const {
    return GetField<int32_t>(VT_NUM_THREADS, 0);
  }
  tflite::XNNPackFlags flags() const {
    return static_cast<tflite::XNNPackFlags>(GetField<int32_t>(VT_FLAGS, 0));
  }
  const ::flatbuffers::String *weight_cache_file_path() const {
    return GetPointer<const ::flatbuffers::String *>(VT_WEIGHT_CACHE_FILE_PATH);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_NUM_THREADS, 4) &&
           VerifyField<int32_t>(verifier, VT_FLAGS, 4) &&
           VerifyOffset(verifier, VT_WEIGHT_CACHE_FILE_PATH) &&
           verifier.VerifyString(weight_cache_file_path()) &&
           verifier.EndTable();
  }
  XNNPackSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(XNNPackSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<XNNPackSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct XNNPackSettingsBuilder {
  typedef XNNPackSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_num_threads(int32_t num_threads) {
    fbb_.AddElement<int32_t>(XNNPackSettings::VT_NUM_THREADS, num_threads, 0);
  }
  void add_flags(tflite::XNNPackFlags flags) {
    fbb_.AddElement<int32_t>(XNNPackSettings::VT_FLAGS, static_cast<int32_t>(flags), 0);
  }
  void add_weight_cache_file_path(::flatbuffers::Offset<::flatbuffers::String> weight_cache_file_path) {
    fbb_.AddOffset(XNNPackSettings::VT_WEIGHT_CACHE_FILE_PATH, weight_cache_file_path);
  }
  explicit XNNPackSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<XNNPackSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<XNNPackSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t num_threads = 0,
    tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS,
    ::flatbuffers::Offset<::flatbuffers::String> weight_cache_file_path = 0) {
  XNNPackSettingsBuilder builder_(_fbb);
  builder_.add_weight_cache_file_path(weight_cache_file_path);
  builder_.add_flags(flags);
  builder_.add_num_threads(num_threads);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t num_threads = 0,
    tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS,
    const char *weight_cache_file_path = nullptr) {
  auto weight_cache_file_path__ = weight_cache_file_path ? _fbb.CreateString(weight_cache_file_path) : 0;
  return tflite::CreateXNNPackSettings(
      _fbb,
      num_threads,
      flags,
      weight_cache_file_path__);
}

::flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(::flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct CoreMLSettingsT : public ::flatbuffers::NativeTable {
  typedef CoreMLSettings TableType;
  tflite::CoreMLSettings_::EnabledDevices enabled_devices = tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL;
  int32_t coreml_version = 0;
  int32_t max_delegated_partitions = 0;
  int32_t min_nodes_per_partition = 2;
};

struct CoreMLSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef CoreMLSettingsT NativeTableType;
  typedef CoreMLSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_ENABLED_DEVICES = 4,
    VT_COREML_VERSION = 6,
    VT_MAX_DELEGATED_PARTITIONS = 8,
    VT_MIN_NODES_PER_PARTITION = 10
  };
  tflite::CoreMLSettings_::EnabledDevices enabled_devices() const {
    return static_cast<tflite::CoreMLSettings_::EnabledDevices>(GetField<int32_t>(VT_ENABLED_DEVICES, 0));
  }
  int32_t coreml_version() const {
    return GetField<int32_t>(VT_COREML_VERSION, 0);
  }
  int32_t max_delegated_partitions() const {
    return GetField<int32_t>(VT_MAX_DELEGATED_PARTITIONS, 0);
  }
  int32_t min_nodes_per_partition() const {
    return GetField<int32_t>(VT_MIN_NODES_PER_PARTITION, 2);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_ENABLED_DEVICES, 4) &&
           VerifyField<int32_t>(verifier, VT_COREML_VERSION, 4) &&
           VerifyField<int32_t>(verifier, VT_MAX_DELEGATED_PARTITIONS, 4) &&
           VerifyField<int32_t>(verifier, VT_MIN_NODES_PER_PARTITION, 4) &&
           verifier.EndTable();
  }
  CoreMLSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(CoreMLSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<CoreMLSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct CoreMLSettingsBuilder {
  typedef CoreMLSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_enabled_devices(tflite::CoreMLSettings_::EnabledDevices enabled_devices) {
    fbb_.AddElement<int32_t>(CoreMLSettings::VT_ENABLED_DEVICES, static_cast<int32_t>(enabled_devices), 0);
  }
  void add_coreml_version(int32_t coreml_version) {
    fbb_.AddElement<int32_t>(CoreMLSettings::VT_COREML_VERSION, coreml_version, 0);
  }
  void add_max_delegated_partitions(int32_t max_delegated_partitions) {
    fbb_.AddElement<int32_t>(CoreMLSettings::VT_MAX_DELEGATED_PARTITIONS, max_delegated_partitions, 0);
  }
  void add_min_nodes_per_partition(int32_t min_nodes_per_partition) {
    fbb_.AddElement<int32_t>(CoreMLSettings::VT_MIN_NODES_PER_PARTITION, min_nodes_per_partition, 2);
  }
  explicit CoreMLSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<CoreMLSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<CoreMLSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<CoreMLSettings> CreateCoreMLSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::CoreMLSettings_::EnabledDevices enabled_devices = tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL,
    int32_t coreml_version = 0,
    int32_t max_delegated_partitions = 0,
    int32_t min_nodes_per_partition = 2) {
  CoreMLSettingsBuilder builder_(_fbb);
  builder_.add_min_nodes_per_partition(min_nodes_per_partition);
  builder_.add_max_delegated_partitions(max_delegated_partitions);
  builder_.add_coreml_version(coreml_version);
  builder_.add_enabled_devices(enabled_devices);
  return builder_.Finish();
}

::flatbuffers::Offset<CoreMLSettings> CreateCoreMLSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct StableDelegateLoaderSettingsT : public ::flatbuffers::NativeTable {
  typedef StableDelegateLoaderSettings TableType;
  std::string delegate_path{};
  std::string delegate_name{};
};

struct StableDelegateLoaderSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef StableDelegateLoaderSettingsT NativeTableType;
  typedef StableDelegateLoaderSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_DELEGATE_PATH = 4,
    VT_DELEGATE_NAME = 6
  };
  const ::flatbuffers::String *delegate_path() const {
    return GetPointer<const ::flatbuffers::String *>(VT_DELEGATE_PATH);
  }
  const ::flatbuffers::String *delegate_name() const {
    return GetPointer<const ::flatbuffers::String *>(VT_DELEGATE_NAME);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_DELEGATE_PATH) &&
           verifier.VerifyString(delegate_path()) &&
           VerifyOffset(verifier, VT_DELEGATE_NAME) &&
           verifier.VerifyString(delegate_name()) &&
           verifier.EndTable();
  }
  StableDelegateLoaderSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(StableDelegateLoaderSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<StableDelegateLoaderSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StableDelegateLoaderSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct StableDelegateLoaderSettingsBuilder {
  typedef StableDelegateLoaderSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_delegate_path(::flatbuffers::Offset<::flatbuffers::String> delegate_path) {
    fbb_.AddOffset(StableDelegateLoaderSettings::VT_DELEGATE_PATH, delegate_path);
  }
  void add_delegate_name(::flatbuffers::Offset<::flatbuffers::String> delegate_name) {
    fbb_.AddOffset(StableDelegateLoaderSettings::VT_DELEGATE_NAME, delegate_name);
  }
  explicit StableDelegateLoaderSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<StableDelegateLoaderSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<StableDelegateLoaderSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<StableDelegateLoaderSettings> CreateStableDelegateLoaderSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> delegate_path = 0,
    ::flatbuffers::Offset<::flatbuffers::String> delegate_name = 0) {
  StableDelegateLoaderSettingsBuilder builder_(_fbb);
  builder_.add_delegate_name(delegate_name);
  builder_.add_delegate_path(delegate_path);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<StableDelegateLoaderSettings> CreateStableDelegateLoaderSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *delegate_path = nullptr,
    const char *delegate_name = nullptr) {
  auto delegate_path__ = delegate_path ? _fbb.CreateString(delegate_path) : 0;
  auto delegate_name__ = delegate_name ? _fbb.CreateString(delegate_name) : 0;
  return tflite::CreateStableDelegateLoaderSettings(
      _fbb,
      delegate_path__,
      delegate_name__);
}

::flatbuffers::Offset<StableDelegateLoaderSettings> CreateStableDelegateLoaderSettings(::flatbuffers::FlatBufferBuilder &_fbb, const StableDelegateLoaderSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct CompilationCachingSettingsT : public ::flatbuffers::NativeTable {
  typedef CompilationCachingSettings TableType;
  std::string cache_dir{};
  std::string model_token{};
};

struct CompilationCachingSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef CompilationCachingSettingsT NativeTableType;
  typedef CompilationCachingSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_CACHE_DIR = 4,
    VT_MODEL_TOKEN = 6
  };
  const ::flatbuffers::String *cache_dir() const {
    return GetPointer<const ::flatbuffers::String *>(VT_CACHE_DIR);
  }
  const ::flatbuffers::String *model_token() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_TOKEN);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_CACHE_DIR) &&
           verifier.VerifyString(cache_dir()) &&
           VerifyOffset(verifier, VT_MODEL_TOKEN) &&
           verifier.VerifyString(model_token()) &&
           verifier.EndTable();
  }
  CompilationCachingSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(CompilationCachingSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<CompilationCachingSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CompilationCachingSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct CompilationCachingSettingsBuilder {
  typedef CompilationCachingSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_cache_dir(::flatbuffers::Offset<::flatbuffers::String> cache_dir) {
    fbb_.AddOffset(CompilationCachingSettings::VT_CACHE_DIR, cache_dir);
  }
  void add_model_token(::flatbuffers::Offset<::flatbuffers::String> model_token) {
    fbb_.AddOffset(CompilationCachingSettings::VT_MODEL_TOKEN, model_token);
  }
  explicit CompilationCachingSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<CompilationCachingSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<CompilationCachingSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<CompilationCachingSettings> CreateCompilationCachingSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> cache_dir = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_token = 0) {
  CompilationCachingSettingsBuilder builder_(_fbb);
  builder_.add_model_token(model_token);
  builder_.add_cache_dir(cache_dir);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<CompilationCachingSettings> CreateCompilationCachingSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *cache_dir = nullptr,
    const char *model_token = nullptr) {
  auto cache_dir__ = cache_dir ? _fbb.CreateString(cache_dir) : 0;
  auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0;
  return tflite::CreateCompilationCachingSettings(
      _fbb,
      cache_dir__,
      model_token__);
}

::flatbuffers::Offset<CompilationCachingSettings> CreateCompilationCachingSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CompilationCachingSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct EdgeTpuDeviceSpecT : public ::flatbuffers::NativeTable {
  typedef EdgeTpuDeviceSpec TableType;
  tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO;
  int32_t num_chips = 0;
  std::vector<std::string> device_paths{};
  int32_t chip_family = 0;
};

struct EdgeTpuDeviceSpec FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef EdgeTpuDeviceSpecT NativeTableType;
  typedef EdgeTpuDeviceSpecBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_PLATFORM_TYPE = 4,
    VT_NUM_CHIPS = 6,
    VT_DEVICE_PATHS = 8,
    VT_CHIP_FAMILY = 10
  };
  tflite::EdgeTpuDeviceSpec_::PlatformType platform_type() const {
    return static_cast<tflite::EdgeTpuDeviceSpec_::PlatformType>(GetField<int32_t>(VT_PLATFORM_TYPE, 0));
  }
  int32_t num_chips() const {
    return GetField<int32_t>(VT_NUM_CHIPS, 0);
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *device_paths() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_DEVICE_PATHS);
  }
  int32_t chip_family() const {
    return GetField<int32_t>(VT_CHIP_FAMILY, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_PLATFORM_TYPE, 4) &&
           VerifyField<int32_t>(verifier, VT_NUM_CHIPS, 4) &&
           VerifyOffset(verifier, VT_DEVICE_PATHS) &&
           verifier.VerifyVector(device_paths()) &&
           verifier.VerifyVectorOfStrings(device_paths()) &&
           VerifyField<int32_t>(verifier, VT_CHIP_FAMILY, 4) &&
           verifier.EndTable();
  }
  EdgeTpuDeviceSpecT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(EdgeTpuDeviceSpecT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<EdgeTpuDeviceSpec> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct EdgeTpuDeviceSpecBuilder {
  typedef EdgeTpuDeviceSpec Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_platform_type(tflite::EdgeTpuDeviceSpec_::PlatformType platform_type) {
    fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_PLATFORM_TYPE, static_cast<int32_t>(platform_type), 0);
  }
  void add_num_chips(int32_t num_chips) {
    fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_NUM_CHIPS, num_chips, 0);
  }
  void add_device_paths(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> device_paths) {
    fbb_.AddOffset(EdgeTpuDeviceSpec::VT_DEVICE_PATHS, device_paths);
  }
  void add_chip_family(int32_t chip_family) {
    fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_CHIP_FAMILY, chip_family, 0);
  }
  explicit EdgeTpuDeviceSpecBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<EdgeTpuDeviceSpec> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<EdgeTpuDeviceSpec>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO,
    int32_t num_chips = 0,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> device_paths = 0,
    int32_t chip_family = 0) {
  EdgeTpuDeviceSpecBuilder builder_(_fbb);
  builder_.add_chip_family(chip_family);
  builder_.add_device_paths(device_paths);
  builder_.add_num_chips(num_chips);
  builder_.add_platform_type(platform_type);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpecDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO,
    int32_t num_chips = 0,
    const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *device_paths = nullptr,
    int32_t chip_family = 0) {
  auto device_paths__ = device_paths ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*device_paths) : 0;
  return tflite::CreateEdgeTpuDeviceSpec(
      _fbb,
      platform_type,
      num_chips,
      device_paths__,
      chip_family);
}

::flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct EdgeTpuInactivePowerConfigT : public ::flatbuffers::NativeTable {
  typedef EdgeTpuInactivePowerConfig TableType;
  tflite::EdgeTpuPowerState inactive_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE;
  int64_t inactive_timeout_us = 0;
};

struct EdgeTpuInactivePowerConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef EdgeTpuInactivePowerConfigT NativeTableType;
  typedef EdgeTpuInactivePowerConfigBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_INACTIVE_POWER_STATE = 4,
    VT_INACTIVE_TIMEOUT_US = 6
  };
  tflite::EdgeTpuPowerState inactive_power_state() const {
    return static_cast<tflite::EdgeTpuPowerState>(GetField<int32_t>(VT_INACTIVE_POWER_STATE, 0));
  }
  int64_t inactive_timeout_us() const {
    return GetField<int64_t>(VT_INACTIVE_TIMEOUT_US, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_INACTIVE_POWER_STATE, 4) &&
           VerifyField<int64_t>(verifier, VT_INACTIVE_TIMEOUT_US, 8) &&
           verifier.EndTable();
  }
  EdgeTpuInactivePowerConfigT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(EdgeTpuInactivePowerConfigT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<EdgeTpuInactivePowerConfig> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct EdgeTpuInactivePowerConfigBuilder {
  typedef EdgeTpuInactivePowerConfig Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_inactive_power_state(tflite::EdgeTpuPowerState inactive_power_state) {
    fbb_.AddElement<int32_t>(EdgeTpuInactivePowerConfig::VT_INACTIVE_POWER_STATE, static_cast<int32_t>(inactive_power_state), 0);
  }
  void add_inactive_timeout_us(int64_t inactive_timeout_us) {
    fbb_.AddElement<int64_t>(EdgeTpuInactivePowerConfig::VT_INACTIVE_TIMEOUT_US, inactive_timeout_us, 0);
  }
  explicit EdgeTpuInactivePowerConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<EdgeTpuInactivePowerConfig> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<EdgeTpuInactivePowerConfig>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::EdgeTpuPowerState inactive_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE,
    int64_t inactive_timeout_us = 0) {
  EdgeTpuInactivePowerConfigBuilder builder_(_fbb);
  builder_.add_inactive_timeout_us(inactive_timeout_us);
  builder_.add_inactive_power_state(inactive_power_state);
  return builder_.Finish();
}

::flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct EdgeTpuSettingsT : public ::flatbuffers::NativeTable {
  typedef EdgeTpuSettings TableType;
  tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE;
  std::vector<std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT>> inactive_power_configs{};
  int32_t inference_priority = -1;
  std::unique_ptr<tflite::EdgeTpuDeviceSpecT> edgetpu_device_spec{};
  std::string model_token{};
  tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type = tflite::EdgeTpuSettings_::FloatTruncationType_UNSPECIFIED;
  tflite::EdgeTpuSettings_::QosClass qos_class = tflite::EdgeTpuSettings_::QosClass_QOS_UNDEFINED;
  std::vector<int32_t> hardware_cluster_ids{};
  std::string public_model_id{};
  tflite::EdgeTpuSettings_::UseLayerIrTgcBackend use_layer_ir_tgc_backend = tflite::EdgeTpuSettings_::UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED;
  EdgeTpuSettingsT() = default;
  EdgeTpuSettingsT(const EdgeTpuSettingsT &o);
  EdgeTpuSettingsT(EdgeTpuSettingsT&&) FLATBUFFERS_NOEXCEPT = default;
  EdgeTpuSettingsT &operator=(EdgeTpuSettingsT o) FLATBUFFERS_NOEXCEPT;
};

struct EdgeTpuSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef EdgeTpuSettingsT NativeTableType;
  typedef EdgeTpuSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_INFERENCE_POWER_STATE = 4,
    VT_INACTIVE_POWER_CONFIGS = 6,
    VT_INFERENCE_PRIORITY = 8,
    VT_EDGETPU_DEVICE_SPEC = 10,
    VT_MODEL_TOKEN = 12,
    VT_FLOAT_TRUNCATION_TYPE = 14,
    VT_QOS_CLASS = 16,
    VT_HARDWARE_CLUSTER_IDS = 18,
    VT_PUBLIC_MODEL_ID = 20,
    VT_USE_LAYER_IR_TGC_BACKEND = 22
  };
  tflite::EdgeTpuPowerState inference_power_state() const {
    return static_cast<tflite::EdgeTpuPowerState>(GetField<int32_t>(VT_INFERENCE_POWER_STATE, 0));
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *inactive_power_configs() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *>(VT_INACTIVE_POWER_CONFIGS);
  }
  int32_t inference_priority() const {
    return GetField<int32_t>(VT_INFERENCE_PRIORITY, -1);
  }
  const tflite::EdgeTpuDeviceSpec *edgetpu_device_spec() const {
    return GetPointer<const tflite::EdgeTpuDeviceSpec *>(VT_EDGETPU_DEVICE_SPEC);
  }
  const ::flatbuffers::String *model_token() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_TOKEN);
  }
  tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type() const {
    return static_cast<tflite::EdgeTpuSettings_::FloatTruncationType>(GetField<int32_t>(VT_FLOAT_TRUNCATION_TYPE, 0));
  }
  tflite::EdgeTpuSettings_::QosClass qos_class() const {
    return static_cast<tflite::EdgeTpuSettings_::QosClass>(GetField<int32_t>(VT_QOS_CLASS, 0));
  }
  const ::flatbuffers::Vector<int32_t> *hardware_cluster_ids() const {
    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_HARDWARE_CLUSTER_IDS);
  }
  const ::flatbuffers::String *public_model_id() const {
    return GetPointer<const ::flatbuffers::String *>(VT_PUBLIC_MODEL_ID);
  }
  tflite::EdgeTpuSettings_::UseLayerIrTgcBackend use_layer_ir_tgc_backend() const {
    return static_cast<tflite::EdgeTpuSettings_::UseLayerIrTgcBackend>(GetField<int32_t>(VT_USE_LAYER_IR_TGC_BACKEND, 0));
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_POWER_STATE, 4) &&
           VerifyOffset(verifier, VT_INACTIVE_POWER_CONFIGS) &&
           verifier.VerifyVector(inactive_power_configs()) &&
           verifier.VerifyVectorOfTables(inactive_power_configs()) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_PRIORITY, 4) &&
           VerifyOffset(verifier, VT_EDGETPU_DEVICE_SPEC) &&
           verifier.VerifyTable(edgetpu_device_spec()) &&
           VerifyOffset(verifier, VT_MODEL_TOKEN) &&
           verifier.VerifyString(model_token()) &&
           VerifyField<int32_t>(verifier, VT_FLOAT_TRUNCATION_TYPE, 4) &&
           VerifyField<int32_t>(verifier, VT_QOS_CLASS, 4) &&
           VerifyOffset(verifier, VT_HARDWARE_CLUSTER_IDS) &&
           verifier.VerifyVector(hardware_cluster_ids()) &&
           VerifyOffset(verifier, VT_PUBLIC_MODEL_ID) &&
           verifier.VerifyString(public_model_id()) &&
           VerifyField<int32_t>(verifier, VT_USE_LAYER_IR_TGC_BACKEND, 4) &&
           verifier.EndTable();
  }
  EdgeTpuSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(EdgeTpuSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<EdgeTpuSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct EdgeTpuSettingsBuilder {
  typedef EdgeTpuSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_inference_power_state(tflite::EdgeTpuPowerState inference_power_state) {
    fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_INFERENCE_POWER_STATE, static_cast<int32_t>(inference_power_state), 0);
  }
  void add_inactive_power_configs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>> inactive_power_configs) {
    fbb_.AddOffset(EdgeTpuSettings::VT_INACTIVE_POWER_CONFIGS, inactive_power_configs);
  }
  void add_inference_priority(int32_t inference_priority) {
    fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_INFERENCE_PRIORITY, inference_priority, -1);
  }
  void add_edgetpu_device_spec(::flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec) {
    fbb_.AddOffset(EdgeTpuSettings::VT_EDGETPU_DEVICE_SPEC, edgetpu_device_spec);
  }
  void add_model_token(::flatbuffers::Offset<::flatbuffers::String> model_token) {
    fbb_.AddOffset(EdgeTpuSettings::VT_MODEL_TOKEN, model_token);
  }
  void add_float_truncation_type(tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type) {
    fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_FLOAT_TRUNCATION_TYPE, static_cast<int32_t>(float_truncation_type), 0);
  }
  void add_qos_class(tflite::EdgeTpuSettings_::QosClass qos_class) {
    fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_QOS_CLASS, static_cast<int32_t>(qos_class), 0);
  }
  void add_hardware_cluster_ids(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> hardware_cluster_ids) {
    fbb_.AddOffset(EdgeTpuSettings::VT_HARDWARE_CLUSTER_IDS, hardware_cluster_ids);
  }
  void add_public_model_id(::flatbuffers::Offset<::flatbuffers::String> public_model_id) {
    fbb_.AddOffset(EdgeTpuSettings::VT_PUBLIC_MODEL_ID, public_model_id);
  }
  void add_use_layer_ir_tgc_backend(tflite::EdgeTpuSettings_::UseLayerIrTgcBackend use_layer_ir_tgc_backend) {
    fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_USE_LAYER_IR_TGC_BACKEND, static_cast<int32_t>(use_layer_ir_tgc_backend), 0);
  }
  explicit EdgeTpuSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<EdgeTpuSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<EdgeTpuSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>> inactive_power_configs = 0,
    int32_t inference_priority = -1,
    ::flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_token = 0,
    tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type = tflite::EdgeTpuSettings_::FloatTruncationType_UNSPECIFIED,
    tflite::EdgeTpuSettings_::QosClass qos_class = tflite::EdgeTpuSettings_::QosClass_QOS_UNDEFINED,
    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> hardware_cluster_ids = 0,
    ::flatbuffers::Offset<::flatbuffers::String> public_model_id = 0,
    tflite::EdgeTpuSettings_::UseLayerIrTgcBackend use_layer_ir_tgc_backend = tflite::EdgeTpuSettings_::UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED) {
  EdgeTpuSettingsBuilder builder_(_fbb);
  builder_.add_use_layer_ir_tgc_backend(use_layer_ir_tgc_backend);
  builder_.add_public_model_id(public_model_id);
  builder_.add_hardware_cluster_ids(hardware_cluster_ids);
  builder_.add_qos_class(qos_class);
  builder_.add_float_truncation_type(float_truncation_type);
  builder_.add_model_token(model_token);
  builder_.add_edgetpu_device_spec(edgetpu_device_spec);
  builder_.add_inference_priority(inference_priority);
  builder_.add_inactive_power_configs(inactive_power_configs);
  builder_.add_inference_power_state(inference_power_state);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE,
    const std::vector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *inactive_power_configs = nullptr,
    int32_t inference_priority = -1,
    ::flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0,
    const char *model_token = nullptr,
    tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type = tflite::EdgeTpuSettings_::FloatTruncationType_UNSPECIFIED,
    tflite::EdgeTpuSettings_::QosClass qos_class = tflite::EdgeTpuSettings_::QosClass_QOS_UNDEFINED,
    const std::vector<int32_t> *hardware_cluster_ids = nullptr,
    const char *public_model_id = nullptr,
    tflite::EdgeTpuSettings_::UseLayerIrTgcBackend use_layer_ir_tgc_backend = tflite::EdgeTpuSettings_::UseLayerIrTgcBackend_USE_LAYER_IR_TGC_BACKEND_UNSPECIFIED) {
  auto inactive_power_configs__ = inactive_power_configs ? _fbb.CreateVector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>(*inactive_power_configs) : 0;
  auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0;
  auto hardware_cluster_ids__ = hardware_cluster_ids ? _fbb.CreateVector<int32_t>(*hardware_cluster_ids) : 0;
  auto public_model_id__ = public_model_id ? _fbb.CreateString(public_model_id) : 0;
  return tflite::CreateEdgeTpuSettings(
      _fbb,
      inference_power_state,
      inactive_power_configs__,
      inference_priority,
      edgetpu_device_spec,
      model_token__,
      float_truncation_type,
      qos_class,
      hardware_cluster_ids__,
      public_model_id__,
      use_layer_ir_tgc_backend);
}

::flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct GoogleEdgeTpuSettingsT : public ::flatbuffers::NativeTable {
  typedef GoogleEdgeTpuSettings TableType;
  int32_t log_verbosity = -1;
  bool enable_tracing = false;
  tflite::GoogleEdgeTpuSettings_::Priority priority = tflite::GoogleEdgeTpuSettings_::Priority_PRIORITY_UNDEFINED;
  std::vector<uint8_t> extension_data{};
  std::string model_identifier{};
  bool use_async_api = false;
  bool delegate_should_manage_cache_for_inputs = true;
  bool delegate_should_manage_cache_for_outputs = true;
  tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_inputs = tflite::GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED;
  tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_outputs = tflite::GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED;
  bool allow_fp16_precision_for_fp32 = false;
};

struct GoogleEdgeTpuSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef GoogleEdgeTpuSettingsT NativeTableType;
  typedef GoogleEdgeTpuSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_LOG_VERBOSITY = 4,
    VT_ENABLE_TRACING = 6,
    VT_PRIORITY = 8,
    VT_EXTENSION_DATA = 10,
    VT_MODEL_IDENTIFIER = 12,
    VT_USE_ASYNC_API = 14,
    VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_INPUTS = 16,
    VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_OUTPUTS = 18,
    VT_PREFER_CACHE_COHERENCY_FOR_INPUTS = 20,
    VT_PREFER_CACHE_COHERENCY_FOR_OUTPUTS = 22,
    VT_ALLOW_FP16_PRECISION_FOR_FP32 = 24
  };
  int32_t log_verbosity() const {
    return GetField<int32_t>(VT_LOG_VERBOSITY, -1);
  }
  bool enable_tracing() const {
    return GetField<uint8_t>(VT_ENABLE_TRACING, 0) != 0;
  }
  tflite::GoogleEdgeTpuSettings_::Priority priority() const {
    return static_cast<tflite::GoogleEdgeTpuSettings_::Priority>(GetField<int32_t>(VT_PRIORITY, 0));
  }
  const ::flatbuffers::Vector<uint8_t> *extension_data() const {
    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_EXTENSION_DATA);
  }
  const ::flatbuffers::String *model_identifier() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_IDENTIFIER);
  }
  bool use_async_api() const {
    return GetField<uint8_t>(VT_USE_ASYNC_API, 0) != 0;
  }
  bool delegate_should_manage_cache_for_inputs() const {
    return GetField<uint8_t>(VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_INPUTS, 1) != 0;
  }
  bool delegate_should_manage_cache_for_outputs() const {
    return GetField<uint8_t>(VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_OUTPUTS, 1) != 0;
  }
  tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_inputs() const {
    return static_cast<tflite::GoogleEdgeTpuSettings_::TriState>(GetField<int32_t>(VT_PREFER_CACHE_COHERENCY_FOR_INPUTS, 0));
  }
  tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_outputs() const {
    return static_cast<tflite::GoogleEdgeTpuSettings_::TriState>(GetField<int32_t>(VT_PREFER_CACHE_COHERENCY_FOR_OUTPUTS, 0));
  }
  bool allow_fp16_precision_for_fp32() const {
    return GetField<uint8_t>(VT_ALLOW_FP16_PRECISION_FOR_FP32, 0) != 0;
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_LOG_VERBOSITY, 4) &&
           VerifyField<uint8_t>(verifier, VT_ENABLE_TRACING, 1) &&
           VerifyField<int32_t>(verifier, VT_PRIORITY, 4) &&
           VerifyOffset(verifier, VT_EXTENSION_DATA) &&
           verifier.VerifyVector(extension_data()) &&
           VerifyOffset(verifier, VT_MODEL_IDENTIFIER) &&
           verifier.VerifyString(model_identifier()) &&
           VerifyField<uint8_t>(verifier, VT_USE_ASYNC_API, 1) &&
           VerifyField<uint8_t>(verifier, VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_INPUTS, 1) &&
           VerifyField<uint8_t>(verifier, VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_OUTPUTS, 1) &&
           VerifyField<int32_t>(verifier, VT_PREFER_CACHE_COHERENCY_FOR_INPUTS, 4) &&
           VerifyField<int32_t>(verifier, VT_PREFER_CACHE_COHERENCY_FOR_OUTPUTS, 4) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_FP16_PRECISION_FOR_FP32, 1) &&
           verifier.EndTable();
  }
  GoogleEdgeTpuSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(GoogleEdgeTpuSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<GoogleEdgeTpuSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GoogleEdgeTpuSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct GoogleEdgeTpuSettingsBuilder {
  typedef GoogleEdgeTpuSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_log_verbosity(int32_t log_verbosity) {
    fbb_.AddElement<int32_t>(GoogleEdgeTpuSettings::VT_LOG_VERBOSITY, log_verbosity, -1);
  }
  void add_enable_tracing(bool enable_tracing) {
    fbb_.AddElement<uint8_t>(GoogleEdgeTpuSettings::VT_ENABLE_TRACING, static_cast<uint8_t>(enable_tracing), 0);
  }
  void add_priority(tflite::GoogleEdgeTpuSettings_::Priority priority) {
    fbb_.AddElement<int32_t>(GoogleEdgeTpuSettings::VT_PRIORITY, static_cast<int32_t>(priority), 0);
  }
  void add_extension_data(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> extension_data) {
    fbb_.AddOffset(GoogleEdgeTpuSettings::VT_EXTENSION_DATA, extension_data);
  }
  void add_model_identifier(::flatbuffers::Offset<::flatbuffers::String> model_identifier) {
    fbb_.AddOffset(GoogleEdgeTpuSettings::VT_MODEL_IDENTIFIER, model_identifier);
  }
  void add_use_async_api(bool use_async_api) {
    fbb_.AddElement<uint8_t>(GoogleEdgeTpuSettings::VT_USE_ASYNC_API, static_cast<uint8_t>(use_async_api), 0);
  }
  void add_delegate_should_manage_cache_for_inputs(bool delegate_should_manage_cache_for_inputs) {
    fbb_.AddElement<uint8_t>(GoogleEdgeTpuSettings::VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_INPUTS, static_cast<uint8_t>(delegate_should_manage_cache_for_inputs), 1);
  }
  void add_delegate_should_manage_cache_for_outputs(bool delegate_should_manage_cache_for_outputs) {
    fbb_.AddElement<uint8_t>(GoogleEdgeTpuSettings::VT_DELEGATE_SHOULD_MANAGE_CACHE_FOR_OUTPUTS, static_cast<uint8_t>(delegate_should_manage_cache_for_outputs), 1);
  }
  void add_prefer_cache_coherency_for_inputs(tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_inputs) {
    fbb_.AddElement<int32_t>(GoogleEdgeTpuSettings::VT_PREFER_CACHE_COHERENCY_FOR_INPUTS, static_cast<int32_t>(prefer_cache_coherency_for_inputs), 0);
  }
  void add_prefer_cache_coherency_for_outputs(tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_outputs) {
    fbb_.AddElement<int32_t>(GoogleEdgeTpuSettings::VT_PREFER_CACHE_COHERENCY_FOR_OUTPUTS, static_cast<int32_t>(prefer_cache_coherency_for_outputs), 0);
  }
  void add_allow_fp16_precision_for_fp32(bool allow_fp16_precision_for_fp32) {
    fbb_.AddElement<uint8_t>(GoogleEdgeTpuSettings::VT_ALLOW_FP16_PRECISION_FOR_FP32, static_cast<uint8_t>(allow_fp16_precision_for_fp32), 0);
  }
  explicit GoogleEdgeTpuSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<GoogleEdgeTpuSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<GoogleEdgeTpuSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<GoogleEdgeTpuSettings> CreateGoogleEdgeTpuSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t log_verbosity = -1,
    bool enable_tracing = false,
    tflite::GoogleEdgeTpuSettings_::Priority priority = tflite::GoogleEdgeTpuSettings_::Priority_PRIORITY_UNDEFINED,
    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> extension_data = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_identifier = 0,
    bool use_async_api = false,
    bool delegate_should_manage_cache_for_inputs = true,
    bool delegate_should_manage_cache_for_outputs = true,
    tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_inputs = tflite::GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED,
    tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_outputs = tflite::GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED,
    bool allow_fp16_precision_for_fp32 = false) {
  GoogleEdgeTpuSettingsBuilder builder_(_fbb);
  builder_.add_prefer_cache_coherency_for_outputs(prefer_cache_coherency_for_outputs);
  builder_.add_prefer_cache_coherency_for_inputs(prefer_cache_coherency_for_inputs);
  builder_.add_model_identifier(model_identifier);
  builder_.add_extension_data(extension_data);
  builder_.add_priority(priority);
  builder_.add_log_verbosity(log_verbosity);
  builder_.add_allow_fp16_precision_for_fp32(allow_fp16_precision_for_fp32);
  builder_.add_delegate_should_manage_cache_for_outputs(delegate_should_manage_cache_for_outputs);
  builder_.add_delegate_should_manage_cache_for_inputs(delegate_should_manage_cache_for_inputs);
  builder_.add_use_async_api(use_async_api);
  builder_.add_enable_tracing(enable_tracing);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<GoogleEdgeTpuSettings> CreateGoogleEdgeTpuSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t log_verbosity = -1,
    bool enable_tracing = false,
    tflite::GoogleEdgeTpuSettings_::Priority priority = tflite::GoogleEdgeTpuSettings_::Priority_PRIORITY_UNDEFINED,
    const std::vector<uint8_t> *extension_data = nullptr,
    const char *model_identifier = nullptr,
    bool use_async_api = false,
    bool delegate_should_manage_cache_for_inputs = true,
    bool delegate_should_manage_cache_for_outputs = true,
    tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_inputs = tflite::GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED,
    tflite::GoogleEdgeTpuSettings_::TriState prefer_cache_coherency_for_outputs = tflite::GoogleEdgeTpuSettings_::TriState_TRISTATE_UNDEFINED,
    bool allow_fp16_precision_for_fp32 = false) {
  auto extension_data__ = extension_data ? _fbb.CreateVector<uint8_t>(*extension_data) : 0;
  auto model_identifier__ = model_identifier ? _fbb.CreateString(model_identifier) : 0;
  return tflite::CreateGoogleEdgeTpuSettings(
      _fbb,
      log_verbosity,
      enable_tracing,
      priority,
      extension_data__,
      model_identifier__,
      use_async_api,
      delegate_should_manage_cache_for_inputs,
      delegate_should_manage_cache_for_outputs,
      prefer_cache_coherency_for_inputs,
      prefer_cache_coherency_for_outputs,
      allow_fp16_precision_for_fp32);
}

::flatbuffers::Offset<GoogleEdgeTpuSettings> CreateGoogleEdgeTpuSettings(::flatbuffers::FlatBufferBuilder &_fbb, const GoogleEdgeTpuSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct CoralSettingsT : public ::flatbuffers::NativeTable {
  typedef CoralSettings TableType;
  std::string device{};
  tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED;
  bool usb_always_dfu = false;
  int32_t usb_max_bulk_in_queue_length = 0;
};

struct CoralSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef CoralSettingsT NativeTableType;
  typedef CoralSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_DEVICE = 4,
    VT_PERFORMANCE = 6,
    VT_USB_ALWAYS_DFU = 8,
    VT_USB_MAX_BULK_IN_QUEUE_LENGTH = 10
  };
  const ::flatbuffers::String *device() const {
    return GetPointer<const ::flatbuffers::String *>(VT_DEVICE);
  }
  tflite::CoralSettings_::Performance performance() const {
    return static_cast<tflite::CoralSettings_::Performance>(GetField<int32_t>(VT_PERFORMANCE, 0));
  }
  bool usb_always_dfu() const {
    return GetField<uint8_t>(VT_USB_ALWAYS_DFU, 0) != 0;
  }
  int32_t usb_max_bulk_in_queue_length() const {
    return GetField<int32_t>(VT_USB_MAX_BULK_IN_QUEUE_LENGTH, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_DEVICE) &&
           verifier.VerifyString(device()) &&
           VerifyField<int32_t>(verifier, VT_PERFORMANCE, 4) &&
           VerifyField<uint8_t>(verifier, VT_USB_ALWAYS_DFU, 1) &&
           VerifyField<int32_t>(verifier, VT_USB_MAX_BULK_IN_QUEUE_LENGTH, 4) &&
           verifier.EndTable();
  }
  CoralSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(CoralSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<CoralSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct CoralSettingsBuilder {
  typedef CoralSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_device(::flatbuffers::Offset<::flatbuffers::String> device) {
    fbb_.AddOffset(CoralSettings::VT_DEVICE, device);
  }
  void add_performance(tflite::CoralSettings_::Performance performance) {
    fbb_.AddElement<int32_t>(CoralSettings::VT_PERFORMANCE, static_cast<int32_t>(performance), 0);
  }
  void add_usb_always_dfu(bool usb_always_dfu) {
    fbb_.AddElement<uint8_t>(CoralSettings::VT_USB_ALWAYS_DFU, static_cast<uint8_t>(usb_always_dfu), 0);
  }
  void add_usb_max_bulk_in_queue_length(int32_t usb_max_bulk_in_queue_length) {
    fbb_.AddElement<int32_t>(CoralSettings::VT_USB_MAX_BULK_IN_QUEUE_LENGTH, usb_max_bulk_in_queue_length, 0);
  }
  explicit CoralSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<CoralSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<CoralSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<CoralSettings> CreateCoralSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> device = 0,
    tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED,
    bool usb_always_dfu = false,
    int32_t usb_max_bulk_in_queue_length = 0) {
  CoralSettingsBuilder builder_(_fbb);
  builder_.add_usb_max_bulk_in_queue_length(usb_max_bulk_in_queue_length);
  builder_.add_performance(performance);
  builder_.add_device(device);
  builder_.add_usb_always_dfu(usb_always_dfu);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<CoralSettings> CreateCoralSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *device = nullptr,
    tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED,
    bool usb_always_dfu = false,
    int32_t usb_max_bulk_in_queue_length = 0) {
  auto device__ = device ? _fbb.CreateString(device) : 0;
  return tflite::CreateCoralSettings(
      _fbb,
      device__,
      performance,
      usb_always_dfu,
      usb_max_bulk_in_queue_length);
}

::flatbuffers::Offset<CoralSettings> CreateCoralSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct CPUSettingsT : public ::flatbuffers::NativeTable {
  typedef CPUSettings TableType;
  int32_t num_threads = -1;
};

struct CPUSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef CPUSettingsT NativeTableType;
  typedef CPUSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_NUM_THREADS = 4
  };
  int32_t num_threads() const {
    return GetField<int32_t>(VT_NUM_THREADS, -1);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_NUM_THREADS, 4) &&
           verifier.EndTable();
  }
  CPUSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(CPUSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<CPUSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct CPUSettingsBuilder {
  typedef CPUSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_num_threads(int32_t num_threads) {
    fbb_.AddElement<int32_t>(CPUSettings::VT_NUM_THREADS, num_threads, -1);
  }
  explicit CPUSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<CPUSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<CPUSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<CPUSettings> CreateCPUSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t num_threads = -1) {
  CPUSettingsBuilder builder_(_fbb);
  builder_.add_num_threads(num_threads);
  return builder_.Finish();
}

::flatbuffers::Offset<CPUSettings> CreateCPUSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct ArmNNSettingsT : public ::flatbuffers::NativeTable {
  typedef ArmNNSettings TableType;
  std::string backends{};
  bool fastmath = false;
  std::string additional_parameters{};
};

struct ArmNNSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef ArmNNSettingsT NativeTableType;
  typedef ArmNNSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_BACKENDS = 4,
    VT_FASTMATH = 6,
    VT_ADDITIONAL_PARAMETERS = 8
  };
  const ::flatbuffers::String *backends() const {
    return GetPointer<const ::flatbuffers::String *>(VT_BACKENDS);
  }
  bool fastmath() const {
    return GetField<uint8_t>(VT_FASTMATH, 0) != 0;
  }
  const ::flatbuffers::String *additional_parameters() const {
    return GetPointer<const ::flatbuffers::String *>(VT_ADDITIONAL_PARAMETERS);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_BACKENDS) &&
           verifier.VerifyString(backends()) &&
           VerifyField<uint8_t>(verifier, VT_FASTMATH, 1) &&
           VerifyOffset(verifier, VT_ADDITIONAL_PARAMETERS) &&
           verifier.VerifyString(additional_parameters()) &&
           verifier.EndTable();
  }
  ArmNNSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(ArmNNSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<ArmNNSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArmNNSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct ArmNNSettingsBuilder {
  typedef ArmNNSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_backends(::flatbuffers::Offset<::flatbuffers::String> backends) {
    fbb_.AddOffset(ArmNNSettings::VT_BACKENDS, backends);
  }
  void add_fastmath(bool fastmath) {
    fbb_.AddElement<uint8_t>(ArmNNSettings::VT_FASTMATH, static_cast<uint8_t>(fastmath), 0);
  }
  void add_additional_parameters(::flatbuffers::Offset<::flatbuffers::String> additional_parameters) {
    fbb_.AddOffset(ArmNNSettings::VT_ADDITIONAL_PARAMETERS, additional_parameters);
  }
  explicit ArmNNSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<ArmNNSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<ArmNNSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<ArmNNSettings> CreateArmNNSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> backends = 0,
    bool fastmath = false,
    ::flatbuffers::Offset<::flatbuffers::String> additional_parameters = 0) {
  ArmNNSettingsBuilder builder_(_fbb);
  builder_.add_additional_parameters(additional_parameters);
  builder_.add_backends(backends);
  builder_.add_fastmath(fastmath);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<ArmNNSettings> CreateArmNNSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *backends = nullptr,
    bool fastmath = false,
    const char *additional_parameters = nullptr) {
  auto backends__ = backends ? _fbb.CreateString(backends) : 0;
  auto additional_parameters__ = additional_parameters ? _fbb.CreateString(additional_parameters) : 0;
  return tflite::CreateArmNNSettings(
      _fbb,
      backends__,
      fastmath,
      additional_parameters__);
}

::flatbuffers::Offset<ArmNNSettings> CreateArmNNSettings(::flatbuffers::FlatBufferBuilder &_fbb, const ArmNNSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct MtkNeuronSettingsT : public ::flatbuffers::NativeTable {
  typedef MtkNeuronSettings TableType;
  tflite::MtkNeuronSettings_::ExecutionPreference execution_preference = tflite::MtkNeuronSettings_::ExecutionPreference_PREFERENCE_UNDEFINED;
  tflite::MtkNeuronSettings_::ExecutionPriority execution_priority = tflite::MtkNeuronSettings_::ExecutionPriority_PRIORITY_UNDEFINED;
  std::vector<tflite::MtkNeuronSettings_::OptimizationHint> optimization_hints{};
  tflite::MtkNeuronSettings_::OperationCheckMode operation_check_mode = tflite::MtkNeuronSettings_::OperationCheckMode_NO_OPERATION_CHECK;
  bool allow_fp16_precision_for_fp32 = false;
  bool use_ahwb = false;
  bool use_cacheable_buffer = true;
  std::vector<std::string> compile_options{};
  std::vector<std::string> accelerator_names{};
  std::string neuron_config_path{};
  int32_t inference_deadline_ms = 0;
  int32_t inference_abort_time_ms = 0;
};

struct MtkNeuronSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef MtkNeuronSettingsT NativeTableType;
  typedef MtkNeuronSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_EXECUTION_PREFERENCE = 4,
    VT_EXECUTION_PRIORITY = 6,
    VT_OPTIMIZATION_HINTS = 8,
    VT_OPERATION_CHECK_MODE = 10,
    VT_ALLOW_FP16_PRECISION_FOR_FP32 = 12,
    VT_USE_AHWB = 14,
    VT_USE_CACHEABLE_BUFFER = 16,
    VT_COMPILE_OPTIONS = 18,
    VT_ACCELERATOR_NAMES = 20,
    VT_NEURON_CONFIG_PATH = 22,
    VT_INFERENCE_DEADLINE_MS = 24,
    VT_INFERENCE_ABORT_TIME_MS = 26
  };
  tflite::MtkNeuronSettings_::ExecutionPreference execution_preference() const {
    return static_cast<tflite::MtkNeuronSettings_::ExecutionPreference>(GetField<int32_t>(VT_EXECUTION_PREFERENCE, 0));
  }
  tflite::MtkNeuronSettings_::ExecutionPriority execution_priority() const {
    return static_cast<tflite::MtkNeuronSettings_::ExecutionPriority>(GetField<int32_t>(VT_EXECUTION_PRIORITY, 0));
  }
  const ::flatbuffers::Vector<int32_t> *optimization_hints() const {
    return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OPTIMIZATION_HINTS);
  }
  tflite::MtkNeuronSettings_::OperationCheckMode operation_check_mode() const {
    return static_cast<tflite::MtkNeuronSettings_::OperationCheckMode>(GetField<int32_t>(VT_OPERATION_CHECK_MODE, 0));
  }
  bool allow_fp16_precision_for_fp32() const {
    return GetField<uint8_t>(VT_ALLOW_FP16_PRECISION_FOR_FP32, 0) != 0;
  }
  bool use_ahwb() const {
    return GetField<uint8_t>(VT_USE_AHWB, 0) != 0;
  }
  bool use_cacheable_buffer() const {
    return GetField<uint8_t>(VT_USE_CACHEABLE_BUFFER, 1) != 0;
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *compile_options() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_COMPILE_OPTIONS);
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *accelerator_names() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_ACCELERATOR_NAMES);
  }
  const ::flatbuffers::String *neuron_config_path() const {
    return GetPointer<const ::flatbuffers::String *>(VT_NEURON_CONFIG_PATH);
  }
  int32_t inference_deadline_ms() const {
    return GetField<int32_t>(VT_INFERENCE_DEADLINE_MS, 0);
  }
  int32_t inference_abort_time_ms() const {
    return GetField<int32_t>(VT_INFERENCE_ABORT_TIME_MS, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_EXECUTION_PREFERENCE, 4) &&
           VerifyField<int32_t>(verifier, VT_EXECUTION_PRIORITY, 4) &&
           VerifyOffset(verifier, VT_OPTIMIZATION_HINTS) &&
           verifier.VerifyVector(optimization_hints()) &&
           VerifyField<int32_t>(verifier, VT_OPERATION_CHECK_MODE, 4) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_FP16_PRECISION_FOR_FP32, 1) &&
           VerifyField<uint8_t>(verifier, VT_USE_AHWB, 1) &&
           VerifyField<uint8_t>(verifier, VT_USE_CACHEABLE_BUFFER, 1) &&
           VerifyOffset(verifier, VT_COMPILE_OPTIONS) &&
           verifier.VerifyVector(compile_options()) &&
           verifier.VerifyVectorOfStrings(compile_options()) &&
           VerifyOffset(verifier, VT_ACCELERATOR_NAMES) &&
           verifier.VerifyVector(accelerator_names()) &&
           verifier.VerifyVectorOfStrings(accelerator_names()) &&
           VerifyOffset(verifier, VT_NEURON_CONFIG_PATH) &&
           verifier.VerifyString(neuron_config_path()) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_DEADLINE_MS, 4) &&
           VerifyField<int32_t>(verifier, VT_INFERENCE_ABORT_TIME_MS, 4) &&
           verifier.EndTable();
  }
  MtkNeuronSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(MtkNeuronSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<MtkNeuronSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MtkNeuronSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct MtkNeuronSettingsBuilder {
  typedef MtkNeuronSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_execution_preference(tflite::MtkNeuronSettings_::ExecutionPreference execution_preference) {
    fbb_.AddElement<int32_t>(MtkNeuronSettings::VT_EXECUTION_PREFERENCE, static_cast<int32_t>(execution_preference), 0);
  }
  void add_execution_priority(tflite::MtkNeuronSettings_::ExecutionPriority execution_priority) {
    fbb_.AddElement<int32_t>(MtkNeuronSettings::VT_EXECUTION_PRIORITY, static_cast<int32_t>(execution_priority), 0);
  }
  void add_optimization_hints(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> optimization_hints) {
    fbb_.AddOffset(MtkNeuronSettings::VT_OPTIMIZATION_HINTS, optimization_hints);
  }
  void add_operation_check_mode(tflite::MtkNeuronSettings_::OperationCheckMode operation_check_mode) {
    fbb_.AddElement<int32_t>(MtkNeuronSettings::VT_OPERATION_CHECK_MODE, static_cast<int32_t>(operation_check_mode), 0);
  }
  void add_allow_fp16_precision_for_fp32(bool allow_fp16_precision_for_fp32) {
    fbb_.AddElement<uint8_t>(MtkNeuronSettings::VT_ALLOW_FP16_PRECISION_FOR_FP32, static_cast<uint8_t>(allow_fp16_precision_for_fp32), 0);
  }
  void add_use_ahwb(bool use_ahwb) {
    fbb_.AddElement<uint8_t>(MtkNeuronSettings::VT_USE_AHWB, static_cast<uint8_t>(use_ahwb), 0);
  }
  void add_use_cacheable_buffer(bool use_cacheable_buffer) {
    fbb_.AddElement<uint8_t>(MtkNeuronSettings::VT_USE_CACHEABLE_BUFFER, static_cast<uint8_t>(use_cacheable_buffer), 1);
  }
  void add_compile_options(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> compile_options) {
    fbb_.AddOffset(MtkNeuronSettings::VT_COMPILE_OPTIONS, compile_options);
  }
  void add_accelerator_names(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> accelerator_names) {
    fbb_.AddOffset(MtkNeuronSettings::VT_ACCELERATOR_NAMES, accelerator_names);
  }
  void add_neuron_config_path(::flatbuffers::Offset<::flatbuffers::String> neuron_config_path) {
    fbb_.AddOffset(MtkNeuronSettings::VT_NEURON_CONFIG_PATH, neuron_config_path);
  }
  void add_inference_deadline_ms(int32_t inference_deadline_ms) {
    fbb_.AddElement<int32_t>(MtkNeuronSettings::VT_INFERENCE_DEADLINE_MS, inference_deadline_ms, 0);
  }
  void add_inference_abort_time_ms(int32_t inference_abort_time_ms) {
    fbb_.AddElement<int32_t>(MtkNeuronSettings::VT_INFERENCE_ABORT_TIME_MS, inference_abort_time_ms, 0);
  }
  explicit MtkNeuronSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<MtkNeuronSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<MtkNeuronSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<MtkNeuronSettings> CreateMtkNeuronSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::MtkNeuronSettings_::ExecutionPreference execution_preference = tflite::MtkNeuronSettings_::ExecutionPreference_PREFERENCE_UNDEFINED,
    tflite::MtkNeuronSettings_::ExecutionPriority execution_priority = tflite::MtkNeuronSettings_::ExecutionPriority_PRIORITY_UNDEFINED,
    ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> optimization_hints = 0,
    tflite::MtkNeuronSettings_::OperationCheckMode operation_check_mode = tflite::MtkNeuronSettings_::OperationCheckMode_NO_OPERATION_CHECK,
    bool allow_fp16_precision_for_fp32 = false,
    bool use_ahwb = false,
    bool use_cacheable_buffer = true,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> compile_options = 0,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> accelerator_names = 0,
    ::flatbuffers::Offset<::flatbuffers::String> neuron_config_path = 0,
    int32_t inference_deadline_ms = 0,
    int32_t inference_abort_time_ms = 0) {
  MtkNeuronSettingsBuilder builder_(_fbb);
  builder_.add_inference_abort_time_ms(inference_abort_time_ms);
  builder_.add_inference_deadline_ms(inference_deadline_ms);
  builder_.add_neuron_config_path(neuron_config_path);
  builder_.add_accelerator_names(accelerator_names);
  builder_.add_compile_options(compile_options);
  builder_.add_operation_check_mode(operation_check_mode);
  builder_.add_optimization_hints(optimization_hints);
  builder_.add_execution_priority(execution_priority);
  builder_.add_execution_preference(execution_preference);
  builder_.add_use_cacheable_buffer(use_cacheable_buffer);
  builder_.add_use_ahwb(use_ahwb);
  builder_.add_allow_fp16_precision_for_fp32(allow_fp16_precision_for_fp32);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<MtkNeuronSettings> CreateMtkNeuronSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::MtkNeuronSettings_::ExecutionPreference execution_preference = tflite::MtkNeuronSettings_::ExecutionPreference_PREFERENCE_UNDEFINED,
    tflite::MtkNeuronSettings_::ExecutionPriority execution_priority = tflite::MtkNeuronSettings_::ExecutionPriority_PRIORITY_UNDEFINED,
    const std::vector<int32_t> *optimization_hints = nullptr,
    tflite::MtkNeuronSettings_::OperationCheckMode operation_check_mode = tflite::MtkNeuronSettings_::OperationCheckMode_NO_OPERATION_CHECK,
    bool allow_fp16_precision_for_fp32 = false,
    bool use_ahwb = false,
    bool use_cacheable_buffer = true,
    const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *compile_options = nullptr,
    const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *accelerator_names = nullptr,
    const char *neuron_config_path = nullptr,
    int32_t inference_deadline_ms = 0,
    int32_t inference_abort_time_ms = 0) {
  auto optimization_hints__ = optimization_hints ? _fbb.CreateVector<int32_t>(*optimization_hints) : 0;
  auto compile_options__ = compile_options ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*compile_options) : 0;
  auto accelerator_names__ = accelerator_names ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*accelerator_names) : 0;
  auto neuron_config_path__ = neuron_config_path ? _fbb.CreateString(neuron_config_path) : 0;
  return tflite::CreateMtkNeuronSettings(
      _fbb,
      execution_preference,
      execution_priority,
      optimization_hints__,
      operation_check_mode,
      allow_fp16_precision_for_fp32,
      use_ahwb,
      use_cacheable_buffer,
      compile_options__,
      accelerator_names__,
      neuron_config_path__,
      inference_deadline_ms,
      inference_abort_time_ms);
}

::flatbuffers::Offset<MtkNeuronSettings> CreateMtkNeuronSettings(::flatbuffers::FlatBufferBuilder &_fbb, const MtkNeuronSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct TFLiteSettingsT : public ::flatbuffers::NativeTable {
  typedef TFLiteSettings TableType;
  tflite::Delegate delegate = tflite::Delegate_NONE;
  std::unique_ptr<tflite::NNAPISettingsT> nnapi_settings{};
  std::unique_ptr<tflite::GPUSettingsT> gpu_settings{};
  std::unique_ptr<tflite::HexagonSettingsT> hexagon_settings{};
  std::unique_ptr<tflite::XNNPackSettingsT> xnnpack_settings{};
  std::unique_ptr<tflite::CoreMLSettingsT> coreml_settings{};
  std::unique_ptr<tflite::CPUSettingsT> cpu_settings{};
  int32_t max_delegated_partitions = 0;
  std::unique_ptr<tflite::EdgeTpuSettingsT> edgetpu_settings{};
  std::unique_ptr<tflite::CoralSettingsT> coral_settings{};
  std::unique_ptr<tflite::FallbackSettingsT> fallback_settings{};
  bool disable_default_delegates = false;
  std::unique_ptr<tflite::StableDelegateLoaderSettingsT> stable_delegate_loader_settings{};
  std::unique_ptr<tflite::GoogleEdgeTpuSettingsT> google_edgetpu_settings{};
  std::unique_ptr<tflite::CompilationCachingSettingsT> compilation_caching_settings{};
  std::unique_ptr<tflite::ArmNNSettingsT> armnn_settings{};
  std::unique_ptr<tflite::MtkNeuronSettingsT> mtk_neuron_settings{};
  TFLiteSettingsT() = default;
  TFLiteSettingsT(const TFLiteSettingsT &o);
  TFLiteSettingsT(TFLiteSettingsT&&) FLATBUFFERS_NOEXCEPT = default;
  TFLiteSettingsT &operator=(TFLiteSettingsT o) FLATBUFFERS_NOEXCEPT;
};

struct TFLiteSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef TFLiteSettingsT NativeTableType;
  typedef TFLiteSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_DELEGATE = 4,
    VT_NNAPI_SETTINGS = 6,
    VT_GPU_SETTINGS = 8,
    VT_HEXAGON_SETTINGS = 10,
    VT_XNNPACK_SETTINGS = 12,
    VT_COREML_SETTINGS = 14,
    VT_CPU_SETTINGS = 16,
    VT_MAX_DELEGATED_PARTITIONS = 18,
    VT_EDGETPU_SETTINGS = 20,
    VT_CORAL_SETTINGS = 22,
    VT_FALLBACK_SETTINGS = 24,
    VT_DISABLE_DEFAULT_DELEGATES = 26,
    VT_STABLE_DELEGATE_LOADER_SETTINGS = 28,
    VT_GOOGLE_EDGETPU_SETTINGS = 30,
    VT_COMPILATION_CACHING_SETTINGS = 32,
    VT_ARMNN_SETTINGS = 34,
    VT_MTK_NEURON_SETTINGS = 36
  };
  tflite::Delegate delegate() const {
    return static_cast<tflite::Delegate>(GetField<int32_t>(VT_DELEGATE, 0));
  }
  const tflite::NNAPISettings *nnapi_settings() const {
    return GetPointer<const tflite::NNAPISettings *>(VT_NNAPI_SETTINGS);
  }
  const tflite::GPUSettings *gpu_settings() const {
    return GetPointer<const tflite::GPUSettings *>(VT_GPU_SETTINGS);
  }
  const tflite::HexagonSettings *hexagon_settings() const {
    return GetPointer<const tflite::HexagonSettings *>(VT_HEXAGON_SETTINGS);
  }
  const tflite::XNNPackSettings *xnnpack_settings() const {
    return GetPointer<const tflite::XNNPackSettings *>(VT_XNNPACK_SETTINGS);
  }
  const tflite::CoreMLSettings *coreml_settings() const {
    return GetPointer<const tflite::CoreMLSettings *>(VT_COREML_SETTINGS);
  }
  const tflite::CPUSettings *cpu_settings() const {
    return GetPointer<const tflite::CPUSettings *>(VT_CPU_SETTINGS);
  }
  int32_t max_delegated_partitions() const {
    return GetField<int32_t>(VT_MAX_DELEGATED_PARTITIONS, 0);
  }
  const tflite::EdgeTpuSettings *edgetpu_settings() const {
    return GetPointer<const tflite::EdgeTpuSettings *>(VT_EDGETPU_SETTINGS);
  }
  const tflite::CoralSettings *coral_settings() const {
    return GetPointer<const tflite::CoralSettings *>(VT_CORAL_SETTINGS);
  }
  const tflite::FallbackSettings *fallback_settings() const {
    return GetPointer<const tflite::FallbackSettings *>(VT_FALLBACK_SETTINGS);
  }
  bool disable_default_delegates() const {
    return GetField<uint8_t>(VT_DISABLE_DEFAULT_DELEGATES, 0) != 0;
  }
  const tflite::StableDelegateLoaderSettings *stable_delegate_loader_settings() const {
    return GetPointer<const tflite::StableDelegateLoaderSettings *>(VT_STABLE_DELEGATE_LOADER_SETTINGS);
  }
  const tflite::GoogleEdgeTpuSettings *google_edgetpu_settings() const {
    return GetPointer<const tflite::GoogleEdgeTpuSettings *>(VT_GOOGLE_EDGETPU_SETTINGS);
  }
  const tflite::CompilationCachingSettings *compilation_caching_settings() const {
    return GetPointer<const tflite::CompilationCachingSettings *>(VT_COMPILATION_CACHING_SETTINGS);
  }
  const tflite::ArmNNSettings *armnn_settings() const {
    return GetPointer<const tflite::ArmNNSettings *>(VT_ARMNN_SETTINGS);
  }
  const tflite::MtkNeuronSettings *mtk_neuron_settings() const {
    return GetPointer<const tflite::MtkNeuronSettings *>(VT_MTK_NEURON_SETTINGS);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_DELEGATE, 4) &&
           VerifyOffset(verifier, VT_NNAPI_SETTINGS) &&
           verifier.VerifyTable(nnapi_settings()) &&
           VerifyOffset(verifier, VT_GPU_SETTINGS) &&
           verifier.VerifyTable(gpu_settings()) &&
           VerifyOffset(verifier, VT_HEXAGON_SETTINGS) &&
           verifier.VerifyTable(hexagon_settings()) &&
           VerifyOffset(verifier, VT_XNNPACK_SETTINGS) &&
           verifier.VerifyTable(xnnpack_settings()) &&
           VerifyOffset(verifier, VT_COREML_SETTINGS) &&
           verifier.VerifyTable(coreml_settings()) &&
           VerifyOffset(verifier, VT_CPU_SETTINGS) &&
           verifier.VerifyTable(cpu_settings()) &&
           VerifyField<int32_t>(verifier, VT_MAX_DELEGATED_PARTITIONS, 4) &&
           VerifyOffset(verifier, VT_EDGETPU_SETTINGS) &&
           verifier.VerifyTable(edgetpu_settings()) &&
           VerifyOffset(verifier, VT_CORAL_SETTINGS) &&
           verifier.VerifyTable(coral_settings()) &&
           VerifyOffset(verifier, VT_FALLBACK_SETTINGS) &&
           verifier.VerifyTable(fallback_settings()) &&
           VerifyField<uint8_t>(verifier, VT_DISABLE_DEFAULT_DELEGATES, 1) &&
           VerifyOffset(verifier, VT_STABLE_DELEGATE_LOADER_SETTINGS) &&
           verifier.VerifyTable(stable_delegate_loader_settings()) &&
           VerifyOffset(verifier, VT_GOOGLE_EDGETPU_SETTINGS) &&
           verifier.VerifyTable(google_edgetpu_settings()) &&
           VerifyOffset(verifier, VT_COMPILATION_CACHING_SETTINGS) &&
           verifier.VerifyTable(compilation_caching_settings()) &&
           VerifyOffset(verifier, VT_ARMNN_SETTINGS) &&
           verifier.VerifyTable(armnn_settings()) &&
           VerifyOffset(verifier, VT_MTK_NEURON_SETTINGS) &&
           verifier.VerifyTable(mtk_neuron_settings()) &&
           verifier.EndTable();
  }
  TFLiteSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(TFLiteSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<TFLiteSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct TFLiteSettingsBuilder {
  typedef TFLiteSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_delegate(tflite::Delegate delegate) {
    fbb_.AddElement<int32_t>(TFLiteSettings::VT_DELEGATE, static_cast<int32_t>(delegate), 0);
  }
  void add_nnapi_settings(::flatbuffers::Offset<tflite::NNAPISettings> nnapi_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_NNAPI_SETTINGS, nnapi_settings);
  }
  void add_gpu_settings(::flatbuffers::Offset<tflite::GPUSettings> gpu_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_GPU_SETTINGS, gpu_settings);
  }
  void add_hexagon_settings(::flatbuffers::Offset<tflite::HexagonSettings> hexagon_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_HEXAGON_SETTINGS, hexagon_settings);
  }
  void add_xnnpack_settings(::flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_XNNPACK_SETTINGS, xnnpack_settings);
  }
  void add_coreml_settings(::flatbuffers::Offset<tflite::CoreMLSettings> coreml_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_COREML_SETTINGS, coreml_settings);
  }
  void add_cpu_settings(::flatbuffers::Offset<tflite::CPUSettings> cpu_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_CPU_SETTINGS, cpu_settings);
  }
  void add_max_delegated_partitions(int32_t max_delegated_partitions) {
    fbb_.AddElement<int32_t>(TFLiteSettings::VT_MAX_DELEGATED_PARTITIONS, max_delegated_partitions, 0);
  }
  void add_edgetpu_settings(::flatbuffers::Offset<tflite::EdgeTpuSettings> edgetpu_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_EDGETPU_SETTINGS, edgetpu_settings);
  }
  void add_coral_settings(::flatbuffers::Offset<tflite::CoralSettings> coral_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_CORAL_SETTINGS, coral_settings);
  }
  void add_fallback_settings(::flatbuffers::Offset<tflite::FallbackSettings> fallback_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_FALLBACK_SETTINGS, fallback_settings);
  }
  void add_disable_default_delegates(bool disable_default_delegates) {
    fbb_.AddElement<uint8_t>(TFLiteSettings::VT_DISABLE_DEFAULT_DELEGATES, static_cast<uint8_t>(disable_default_delegates), 0);
  }
  void add_stable_delegate_loader_settings(::flatbuffers::Offset<tflite::StableDelegateLoaderSettings> stable_delegate_loader_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_STABLE_DELEGATE_LOADER_SETTINGS, stable_delegate_loader_settings);
  }
  void add_google_edgetpu_settings(::flatbuffers::Offset<tflite::GoogleEdgeTpuSettings> google_edgetpu_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_GOOGLE_EDGETPU_SETTINGS, google_edgetpu_settings);
  }
  void add_compilation_caching_settings(::flatbuffers::Offset<tflite::CompilationCachingSettings> compilation_caching_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_COMPILATION_CACHING_SETTINGS, compilation_caching_settings);
  }
  void add_armnn_settings(::flatbuffers::Offset<tflite::ArmNNSettings> armnn_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_ARMNN_SETTINGS, armnn_settings);
  }
  void add_mtk_neuron_settings(::flatbuffers::Offset<tflite::MtkNeuronSettings> mtk_neuron_settings) {
    fbb_.AddOffset(TFLiteSettings::VT_MTK_NEURON_SETTINGS, mtk_neuron_settings);
  }
  explicit TFLiteSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<TFLiteSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<TFLiteSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::Delegate delegate = tflite::Delegate_NONE,
    ::flatbuffers::Offset<tflite::NNAPISettings> nnapi_settings = 0,
    ::flatbuffers::Offset<tflite::GPUSettings> gpu_settings = 0,
    ::flatbuffers::Offset<tflite::HexagonSettings> hexagon_settings = 0,
    ::flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings = 0,
    ::flatbuffers::Offset<tflite::CoreMLSettings> coreml_settings = 0,
    ::flatbuffers::Offset<tflite::CPUSettings> cpu_settings = 0,
    int32_t max_delegated_partitions = 0,
    ::flatbuffers::Offset<tflite::EdgeTpuSettings> edgetpu_settings = 0,
    ::flatbuffers::Offset<tflite::CoralSettings> coral_settings = 0,
    ::flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0,
    bool disable_default_delegates = false,
    ::flatbuffers::Offset<tflite::StableDelegateLoaderSettings> stable_delegate_loader_settings = 0,
    ::flatbuffers::Offset<tflite::GoogleEdgeTpuSettings> google_edgetpu_settings = 0,
    ::flatbuffers::Offset<tflite::CompilationCachingSettings> compilation_caching_settings = 0,
    ::flatbuffers::Offset<tflite::ArmNNSettings> armnn_settings = 0,
    ::flatbuffers::Offset<tflite::MtkNeuronSettings> mtk_neuron_settings = 0) {
  TFLiteSettingsBuilder builder_(_fbb);
  builder_.add_mtk_neuron_settings(mtk_neuron_settings);
  builder_.add_armnn_settings(armnn_settings);
  builder_.add_compilation_caching_settings(compilation_caching_settings);
  builder_.add_google_edgetpu_settings(google_edgetpu_settings);
  builder_.add_stable_delegate_loader_settings(stable_delegate_loader_settings);
  builder_.add_fallback_settings(fallback_settings);
  builder_.add_coral_settings(coral_settings);
  builder_.add_edgetpu_settings(edgetpu_settings);
  builder_.add_max_delegated_partitions(max_delegated_partitions);
  builder_.add_cpu_settings(cpu_settings);
  builder_.add_coreml_settings(coreml_settings);
  builder_.add_xnnpack_settings(xnnpack_settings);
  builder_.add_hexagon_settings(hexagon_settings);
  builder_.add_gpu_settings(gpu_settings);
  builder_.add_nnapi_settings(nnapi_settings);
  builder_.add_delegate(delegate);
  builder_.add_disable_default_delegates(disable_default_delegates);
  return builder_.Finish();
}

::flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(::flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct FallbackSettingsT : public ::flatbuffers::NativeTable {
  typedef FallbackSettings TableType;
  bool allow_automatic_fallback_on_compilation_error = false;
  bool allow_automatic_fallback_on_execution_error = false;
};

struct FallbackSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef FallbackSettingsT NativeTableType;
  typedef FallbackSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR = 4,
    VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR = 6
  };
  bool allow_automatic_fallback_on_compilation_error() const {
    return GetField<uint8_t>(VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, 0) != 0;
  }
  bool allow_automatic_fallback_on_execution_error() const {
    return GetField<uint8_t>(VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, 0) != 0;
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, 1) &&
           VerifyField<uint8_t>(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, 1) &&
           verifier.EndTable();
  }
  FallbackSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(FallbackSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<FallbackSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct FallbackSettingsBuilder {
  typedef FallbackSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_allow_automatic_fallback_on_compilation_error(bool allow_automatic_fallback_on_compilation_error) {
    fbb_.AddElement<uint8_t>(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, static_cast<uint8_t>(allow_automatic_fallback_on_compilation_error), 0);
  }
  void add_allow_automatic_fallback_on_execution_error(bool allow_automatic_fallback_on_execution_error) {
    fbb_.AddElement<uint8_t>(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, static_cast<uint8_t>(allow_automatic_fallback_on_execution_error), 0);
  }
  explicit FallbackSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<FallbackSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<FallbackSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    bool allow_automatic_fallback_on_compilation_error = false,
    bool allow_automatic_fallback_on_execution_error = false) {
  FallbackSettingsBuilder builder_(_fbb);
  builder_.add_allow_automatic_fallback_on_execution_error(allow_automatic_fallback_on_execution_error);
  builder_.add_allow_automatic_fallback_on_compilation_error(allow_automatic_fallback_on_compilation_error);
  return builder_.Finish();
}

::flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(::flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkMetricT : public ::flatbuffers::NativeTable {
  typedef BenchmarkMetric TableType;
  std::string name{};
  std::vector<float> values{};
};

struct BenchmarkMetric FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkMetricT NativeTableType;
  typedef BenchmarkMetricBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_NAME = 4,
    VT_VALUES = 6
  };
  const ::flatbuffers::String *name() const {
    return GetPointer<const ::flatbuffers::String *>(VT_NAME);
  }
  const ::flatbuffers::Vector<float> *values() const {
    return GetPointer<const ::flatbuffers::Vector<float> *>(VT_VALUES);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_NAME) &&
           verifier.VerifyString(name()) &&
           VerifyOffset(verifier, VT_VALUES) &&
           verifier.VerifyVector(values()) &&
           verifier.EndTable();
  }
  BenchmarkMetricT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkMetricT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkMetric> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkMetricBuilder {
  typedef BenchmarkMetric Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
    fbb_.AddOffset(BenchmarkMetric::VT_NAME, name);
  }
  void add_values(::flatbuffers::Offset<::flatbuffers::Vector<float>> values) {
    fbb_.AddOffset(BenchmarkMetric::VT_VALUES, values);
  }
  explicit BenchmarkMetricBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkMetric> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkMetric>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> name = 0,
    ::flatbuffers::Offset<::flatbuffers::Vector<float>> values = 0) {
  BenchmarkMetricBuilder builder_(_fbb);
  builder_.add_values(values);
  builder_.add_name(name);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetricDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *name = nullptr,
    const std::vector<float> *values = nullptr) {
  auto name__ = name ? _fbb.CreateString(name) : 0;
  auto values__ = values ? _fbb.CreateVector<float>(*values) : 0;
  return tflite::CreateBenchmarkMetric(
      _fbb,
      name__,
      values__);
}

::flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkResultT : public ::flatbuffers::NativeTable {
  typedef BenchmarkResult TableType;
  std::vector<int64_t> initialization_time_us{};
  std::vector<int64_t> inference_time_us{};
  int32_t max_memory_kb = 0;
  bool ok = false;
  std::vector<std::unique_ptr<tflite::BenchmarkMetricT>> metrics{};
  std::vector<std::unique_ptr<tflite::BenchmarkResult_::InferenceOutputT>> actual_output{};
  BenchmarkResultT() = default;
  BenchmarkResultT(const BenchmarkResultT &o);
  BenchmarkResultT(BenchmarkResultT&&) FLATBUFFERS_NOEXCEPT = default;
  BenchmarkResultT &operator=(BenchmarkResultT o) FLATBUFFERS_NOEXCEPT;
};

struct BenchmarkResult FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkResultT NativeTableType;
  typedef BenchmarkResultBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_INITIALIZATION_TIME_US = 4,
    VT_INFERENCE_TIME_US = 6,
    VT_MAX_MEMORY_KB = 8,
    VT_OK = 10,
    VT_METRICS = 12,
    VT_ACTUAL_OUTPUT = 14
  };
  const ::flatbuffers::Vector<int64_t> *initialization_time_us() const {
    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_INITIALIZATION_TIME_US);
  }
  const ::flatbuffers::Vector<int64_t> *inference_time_us() const {
    return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_INFERENCE_TIME_US);
  }
  int32_t max_memory_kb() const {
    return GetField<int32_t>(VT_MAX_MEMORY_KB, 0);
  }
  bool ok() const {
    return GetField<uint8_t>(VT_OK, 0) != 0;
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkMetric>> *metrics() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkMetric>> *>(VT_METRICS);
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>> *actual_output() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>> *>(VT_ACTUAL_OUTPUT);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_INITIALIZATION_TIME_US) &&
           verifier.VerifyVector(initialization_time_us()) &&
           VerifyOffset(verifier, VT_INFERENCE_TIME_US) &&
           verifier.VerifyVector(inference_time_us()) &&
           VerifyField<int32_t>(verifier, VT_MAX_MEMORY_KB, 4) &&
           VerifyField<uint8_t>(verifier, VT_OK, 1) &&
           VerifyOffset(verifier, VT_METRICS) &&
           verifier.VerifyVector(metrics()) &&
           verifier.VerifyVectorOfTables(metrics()) &&
           VerifyOffset(verifier, VT_ACTUAL_OUTPUT) &&
           verifier.VerifyVector(actual_output()) &&
           verifier.VerifyVectorOfTables(actual_output()) &&
           verifier.EndTable();
  }
  BenchmarkResultT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkResultT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkResult> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkResultBuilder {
  typedef BenchmarkResult Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_initialization_time_us(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> initialization_time_us) {
    fbb_.AddOffset(BenchmarkResult::VT_INITIALIZATION_TIME_US, initialization_time_us);
  }
  void add_inference_time_us(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> inference_time_us) {
    fbb_.AddOffset(BenchmarkResult::VT_INFERENCE_TIME_US, inference_time_us);
  }
  void add_max_memory_kb(int32_t max_memory_kb) {
    fbb_.AddElement<int32_t>(BenchmarkResult::VT_MAX_MEMORY_KB, max_memory_kb, 0);
  }
  void add_ok(bool ok) {
    fbb_.AddElement<uint8_t>(BenchmarkResult::VT_OK, static_cast<uint8_t>(ok), 0);
  }
  void add_metrics(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkMetric>>> metrics) {
    fbb_.AddOffset(BenchmarkResult::VT_METRICS, metrics);
  }
  void add_actual_output(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>>> actual_output) {
    fbb_.AddOffset(BenchmarkResult::VT_ACTUAL_OUTPUT, actual_output);
  }
  explicit BenchmarkResultBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkResult> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkResult>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> initialization_time_us = 0,
    ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> inference_time_us = 0,
    int32_t max_memory_kb = 0,
    bool ok = false,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkMetric>>> metrics = 0,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>>> actual_output = 0) {
  BenchmarkResultBuilder builder_(_fbb);
  builder_.add_actual_output(actual_output);
  builder_.add_metrics(metrics);
  builder_.add_max_memory_kb(max_memory_kb);
  builder_.add_inference_time_us(inference_time_us);
  builder_.add_initialization_time_us(initialization_time_us);
  builder_.add_ok(ok);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResultDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const std::vector<int64_t> *initialization_time_us = nullptr,
    const std::vector<int64_t> *inference_time_us = nullptr,
    int32_t max_memory_kb = 0,
    bool ok = false,
    const std::vector<::flatbuffers::Offset<tflite::BenchmarkMetric>> *metrics = nullptr,
    const std::vector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>> *actual_output = nullptr) {
  auto initialization_time_us__ = initialization_time_us ? _fbb.CreateVector<int64_t>(*initialization_time_us) : 0;
  auto inference_time_us__ = inference_time_us ? _fbb.CreateVector<int64_t>(*inference_time_us) : 0;
  auto metrics__ = metrics ? _fbb.CreateVector<::flatbuffers::Offset<tflite::BenchmarkMetric>>(*metrics) : 0;
  auto actual_output__ = actual_output ? _fbb.CreateVector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>>(*actual_output) : 0;
  return tflite::CreateBenchmarkResult(
      _fbb,
      initialization_time_us__,
      inference_time_us__,
      max_memory_kb,
      ok,
      metrics__,
      actual_output__);
}

::flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

namespace BenchmarkResult_ {

struct InferenceOutputT : public ::flatbuffers::NativeTable {
  typedef InferenceOutput TableType;
  std::vector<uint8_t> value{};
};

struct InferenceOutput FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef InferenceOutputT NativeTableType;
  typedef InferenceOutputBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_VALUE = 4
  };
  const ::flatbuffers::Vector<uint8_t> *value() const {
    return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_VALUE);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_VALUE) &&
           verifier.VerifyVector(value()) &&
           verifier.EndTable();
  }
  InferenceOutputT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(InferenceOutputT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<InferenceOutput> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const InferenceOutputT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct InferenceOutputBuilder {
  typedef InferenceOutput Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_value(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> value) {
    fbb_.AddOffset(InferenceOutput::VT_VALUE, value);
  }
  explicit InferenceOutputBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<InferenceOutput> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<InferenceOutput>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<InferenceOutput> CreateInferenceOutput(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> value = 0) {
  InferenceOutputBuilder builder_(_fbb);
  builder_.add_value(value);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<InferenceOutput> CreateInferenceOutputDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const std::vector<uint8_t> *value = nullptr) {
  auto value__ = value ? _fbb.CreateVector<uint8_t>(*value) : 0;
  return tflite::BenchmarkResult_::CreateInferenceOutput(
      _fbb,
      value__);
}

::flatbuffers::Offset<InferenceOutput> CreateInferenceOutput(::flatbuffers::FlatBufferBuilder &_fbb, const InferenceOutputT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

}  // namespace BenchmarkResult_

struct ErrorCodeT : public ::flatbuffers::NativeTable {
  typedef ErrorCode TableType;
  tflite::Delegate source = tflite::Delegate_NONE;
  int32_t tflite_error = 0;
  int64_t underlying_api_error = 0;
};

struct ErrorCode FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef ErrorCodeT NativeTableType;
  typedef ErrorCodeBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_SOURCE = 4,
    VT_TFLITE_ERROR = 6,
    VT_UNDERLYING_API_ERROR = 8
  };
  tflite::Delegate source() const {
    return static_cast<tflite::Delegate>(GetField<int32_t>(VT_SOURCE, 0));
  }
  int32_t tflite_error() const {
    return GetField<int32_t>(VT_TFLITE_ERROR, 0);
  }
  int64_t underlying_api_error() const {
    return GetField<int64_t>(VT_UNDERLYING_API_ERROR, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_SOURCE, 4) &&
           VerifyField<int32_t>(verifier, VT_TFLITE_ERROR, 4) &&
           VerifyField<int64_t>(verifier, VT_UNDERLYING_API_ERROR, 8) &&
           verifier.EndTable();
  }
  ErrorCodeT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(ErrorCodeT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<ErrorCode> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct ErrorCodeBuilder {
  typedef ErrorCode Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_source(tflite::Delegate source) {
    fbb_.AddElement<int32_t>(ErrorCode::VT_SOURCE, static_cast<int32_t>(source), 0);
  }
  void add_tflite_error(int32_t tflite_error) {
    fbb_.AddElement<int32_t>(ErrorCode::VT_TFLITE_ERROR, tflite_error, 0);
  }
  void add_underlying_api_error(int64_t underlying_api_error) {
    fbb_.AddElement<int64_t>(ErrorCode::VT_UNDERLYING_API_ERROR, underlying_api_error, 0);
  }
  explicit ErrorCodeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<ErrorCode> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<ErrorCode>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<ErrorCode> CreateErrorCode(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::Delegate source = tflite::Delegate_NONE,
    int32_t tflite_error = 0,
    int64_t underlying_api_error = 0) {
  ErrorCodeBuilder builder_(_fbb);
  builder_.add_underlying_api_error(underlying_api_error);
  builder_.add_tflite_error(tflite_error);
  builder_.add_source(source);
  return builder_.Finish();
}

::flatbuffers::Offset<ErrorCode> CreateErrorCode(::flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkErrorT : public ::flatbuffers::NativeTable {
  typedef BenchmarkError TableType;
  tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN;
  int32_t exit_code = 0;
  int32_t signal = 0;
  std::vector<std::unique_ptr<tflite::ErrorCodeT>> error_code{};
  int32_t mini_benchmark_error_code = 0;
  BenchmarkErrorT() = default;
  BenchmarkErrorT(const BenchmarkErrorT &o);
  BenchmarkErrorT(BenchmarkErrorT&&) FLATBUFFERS_NOEXCEPT = default;
  BenchmarkErrorT &operator=(BenchmarkErrorT o) FLATBUFFERS_NOEXCEPT;
};

struct BenchmarkError FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkErrorT NativeTableType;
  typedef BenchmarkErrorBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_STAGE = 4,
    VT_EXIT_CODE = 6,
    VT_SIGNAL = 8,
    VT_ERROR_CODE = 10,
    VT_MINI_BENCHMARK_ERROR_CODE = 12
  };
  tflite::BenchmarkStage stage() const {
    return static_cast<tflite::BenchmarkStage>(GetField<int32_t>(VT_STAGE, 0));
  }
  int32_t exit_code() const {
    return GetField<int32_t>(VT_EXIT_CODE, 0);
  }
  int32_t signal() const {
    return GetField<int32_t>(VT_SIGNAL, 0);
  }
  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::ErrorCode>> *error_code() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::ErrorCode>> *>(VT_ERROR_CODE);
  }
  int32_t mini_benchmark_error_code() const {
    return GetField<int32_t>(VT_MINI_BENCHMARK_ERROR_CODE, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_STAGE, 4) &&
           VerifyField<int32_t>(verifier, VT_EXIT_CODE, 4) &&
           VerifyField<int32_t>(verifier, VT_SIGNAL, 4) &&
           VerifyOffset(verifier, VT_ERROR_CODE) &&
           verifier.VerifyVector(error_code()) &&
           verifier.VerifyVectorOfTables(error_code()) &&
           VerifyField<int32_t>(verifier, VT_MINI_BENCHMARK_ERROR_CODE, 4) &&
           verifier.EndTable();
  }
  BenchmarkErrorT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkErrorT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkError> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkErrorBuilder {
  typedef BenchmarkError Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_stage(tflite::BenchmarkStage stage) {
    fbb_.AddElement<int32_t>(BenchmarkError::VT_STAGE, static_cast<int32_t>(stage), 0);
  }
  void add_exit_code(int32_t exit_code) {
    fbb_.AddElement<int32_t>(BenchmarkError::VT_EXIT_CODE, exit_code, 0);
  }
  void add_signal(int32_t signal) {
    fbb_.AddElement<int32_t>(BenchmarkError::VT_SIGNAL, signal, 0);
  }
  void add_error_code(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::ErrorCode>>> error_code) {
    fbb_.AddOffset(BenchmarkError::VT_ERROR_CODE, error_code);
  }
  void add_mini_benchmark_error_code(int32_t mini_benchmark_error_code) {
    fbb_.AddElement<int32_t>(BenchmarkError::VT_MINI_BENCHMARK_ERROR_CODE, mini_benchmark_error_code, 0);
  }
  explicit BenchmarkErrorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkError> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkError>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN,
    int32_t exit_code = 0,
    int32_t signal = 0,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::ErrorCode>>> error_code = 0,
    int32_t mini_benchmark_error_code = 0) {
  BenchmarkErrorBuilder builder_(_fbb);
  builder_.add_mini_benchmark_error_code(mini_benchmark_error_code);
  builder_.add_error_code(error_code);
  builder_.add_signal(signal);
  builder_.add_exit_code(exit_code);
  builder_.add_stage(stage);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<BenchmarkError> CreateBenchmarkErrorDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN,
    int32_t exit_code = 0,
    int32_t signal = 0,
    const std::vector<::flatbuffers::Offset<tflite::ErrorCode>> *error_code = nullptr,
    int32_t mini_benchmark_error_code = 0) {
  auto error_code__ = error_code ? _fbb.CreateVector<::flatbuffers::Offset<tflite::ErrorCode>>(*error_code) : 0;
  return tflite::CreateBenchmarkError(
      _fbb,
      stage,
      exit_code,
      signal,
      error_code__,
      mini_benchmark_error_code);
}

::flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkEventT : public ::flatbuffers::NativeTable {
  typedef BenchmarkEvent TableType;
  std::unique_ptr<tflite::TFLiteSettingsT> tflite_settings{};
  tflite::BenchmarkEventType event_type = tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE;
  std::unique_ptr<tflite::BenchmarkResultT> result{};
  std::unique_ptr<tflite::BenchmarkErrorT> error{};
  int64_t boottime_us = 0;
  int64_t wallclock_us = 0;
  BenchmarkEventT() = default;
  BenchmarkEventT(const BenchmarkEventT &o);
  BenchmarkEventT(BenchmarkEventT&&) FLATBUFFERS_NOEXCEPT = default;
  BenchmarkEventT &operator=(BenchmarkEventT o) FLATBUFFERS_NOEXCEPT;
};

struct BenchmarkEvent FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkEventT NativeTableType;
  typedef BenchmarkEventBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_TFLITE_SETTINGS = 4,
    VT_EVENT_TYPE = 6,
    VT_RESULT = 8,
    VT_ERROR = 10,
    VT_BOOTTIME_US = 12,
    VT_WALLCLOCK_US = 14
  };
  const tflite::TFLiteSettings *tflite_settings() const {
    return GetPointer<const tflite::TFLiteSettings *>(VT_TFLITE_SETTINGS);
  }
  tflite::BenchmarkEventType event_type() const {
    return static_cast<tflite::BenchmarkEventType>(GetField<int32_t>(VT_EVENT_TYPE, 0));
  }
  const tflite::BenchmarkResult *result() const {
    return GetPointer<const tflite::BenchmarkResult *>(VT_RESULT);
  }
  const tflite::BenchmarkError *error() const {
    return GetPointer<const tflite::BenchmarkError *>(VT_ERROR);
  }
  int64_t boottime_us() const {
    return GetField<int64_t>(VT_BOOTTIME_US, 0);
  }
  int64_t wallclock_us() const {
    return GetField<int64_t>(VT_WALLCLOCK_US, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_TFLITE_SETTINGS) &&
           verifier.VerifyTable(tflite_settings()) &&
           VerifyField<int32_t>(verifier, VT_EVENT_TYPE, 4) &&
           VerifyOffset(verifier, VT_RESULT) &&
           verifier.VerifyTable(result()) &&
           VerifyOffset(verifier, VT_ERROR) &&
           verifier.VerifyTable(error()) &&
           VerifyField<int64_t>(verifier, VT_BOOTTIME_US, 8) &&
           VerifyField<int64_t>(verifier, VT_WALLCLOCK_US, 8) &&
           verifier.EndTable();
  }
  BenchmarkEventT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkEventT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkEvent> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkEventBuilder {
  typedef BenchmarkEvent Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_tflite_settings(::flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings) {
    fbb_.AddOffset(BenchmarkEvent::VT_TFLITE_SETTINGS, tflite_settings);
  }
  void add_event_type(tflite::BenchmarkEventType event_type) {
    fbb_.AddElement<int32_t>(BenchmarkEvent::VT_EVENT_TYPE, static_cast<int32_t>(event_type), 0);
  }
  void add_result(::flatbuffers::Offset<tflite::BenchmarkResult> result) {
    fbb_.AddOffset(BenchmarkEvent::VT_RESULT, result);
  }
  void add_error(::flatbuffers::Offset<tflite::BenchmarkError> error) {
    fbb_.AddOffset(BenchmarkEvent::VT_ERROR, error);
  }
  void add_boottime_us(int64_t boottime_us) {
    fbb_.AddElement<int64_t>(BenchmarkEvent::VT_BOOTTIME_US, boottime_us, 0);
  }
  void add_wallclock_us(int64_t wallclock_us) {
    fbb_.AddElement<int64_t>(BenchmarkEvent::VT_WALLCLOCK_US, wallclock_us, 0);
  }
  explicit BenchmarkEventBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkEvent> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkEvent>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0,
    tflite::BenchmarkEventType event_type = tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE,
    ::flatbuffers::Offset<tflite::BenchmarkResult> result = 0,
    ::flatbuffers::Offset<tflite::BenchmarkError> error = 0,
    int64_t boottime_us = 0,
    int64_t wallclock_us = 0) {
  BenchmarkEventBuilder builder_(_fbb);
  builder_.add_wallclock_us(wallclock_us);
  builder_.add_boottime_us(boottime_us);
  builder_.add_error(error);
  builder_.add_result(result);
  builder_.add_event_type(event_type);
  builder_.add_tflite_settings(tflite_settings);
  return builder_.Finish();
}

::flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BestAccelerationDecisionT : public ::flatbuffers::NativeTable {
  typedef BestAccelerationDecision TableType;
  int32_t number_of_source_events = 0;
  std::unique_ptr<tflite::BenchmarkEventT> min_latency_event{};
  int64_t min_inference_time_us = 0;
  BestAccelerationDecisionT() = default;
  BestAccelerationDecisionT(const BestAccelerationDecisionT &o);
  BestAccelerationDecisionT(BestAccelerationDecisionT&&) FLATBUFFERS_NOEXCEPT = default;
  BestAccelerationDecisionT &operator=(BestAccelerationDecisionT o) FLATBUFFERS_NOEXCEPT;
};

struct BestAccelerationDecision FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BestAccelerationDecisionT NativeTableType;
  typedef BestAccelerationDecisionBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_NUMBER_OF_SOURCE_EVENTS = 4,
    VT_MIN_LATENCY_EVENT = 6,
    VT_MIN_INFERENCE_TIME_US = 8
  };
  int32_t number_of_source_events() const {
    return GetField<int32_t>(VT_NUMBER_OF_SOURCE_EVENTS, 0);
  }
  const tflite::BenchmarkEvent *min_latency_event() const {
    return GetPointer<const tflite::BenchmarkEvent *>(VT_MIN_LATENCY_EVENT);
  }
  int64_t min_inference_time_us() const {
    return GetField<int64_t>(VT_MIN_INFERENCE_TIME_US, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_NUMBER_OF_SOURCE_EVENTS, 4) &&
           VerifyOffset(verifier, VT_MIN_LATENCY_EVENT) &&
           verifier.VerifyTable(min_latency_event()) &&
           VerifyField<int64_t>(verifier, VT_MIN_INFERENCE_TIME_US, 8) &&
           verifier.EndTable();
  }
  BestAccelerationDecisionT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BestAccelerationDecisionT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BestAccelerationDecision> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BestAccelerationDecisionBuilder {
  typedef BestAccelerationDecision Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_number_of_source_events(int32_t number_of_source_events) {
    fbb_.AddElement<int32_t>(BestAccelerationDecision::VT_NUMBER_OF_SOURCE_EVENTS, number_of_source_events, 0);
  }
  void add_min_latency_event(::flatbuffers::Offset<tflite::BenchmarkEvent> min_latency_event) {
    fbb_.AddOffset(BestAccelerationDecision::VT_MIN_LATENCY_EVENT, min_latency_event);
  }
  void add_min_inference_time_us(int64_t min_inference_time_us) {
    fbb_.AddElement<int64_t>(BestAccelerationDecision::VT_MIN_INFERENCE_TIME_US, min_inference_time_us, 0);
  }
  explicit BestAccelerationDecisionBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BestAccelerationDecision> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BestAccelerationDecision>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BestAccelerationDecision> CreateBestAccelerationDecision(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t number_of_source_events = 0,
    ::flatbuffers::Offset<tflite::BenchmarkEvent> min_latency_event = 0,
    int64_t min_inference_time_us = 0) {
  BestAccelerationDecisionBuilder builder_(_fbb);
  builder_.add_min_inference_time_us(min_inference_time_us);
  builder_.add_min_latency_event(min_latency_event);
  builder_.add_number_of_source_events(number_of_source_events);
  return builder_.Finish();
}

::flatbuffers::Offset<BestAccelerationDecision> CreateBestAccelerationDecision(::flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkInitializationFailureT : public ::flatbuffers::NativeTable {
  typedef BenchmarkInitializationFailure TableType;
  int32_t initialization_status = 0;
};

struct BenchmarkInitializationFailure FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkInitializationFailureT NativeTableType;
  typedef BenchmarkInitializationFailureBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_INITIALIZATION_STATUS = 4
  };
  int32_t initialization_status() const {
    return GetField<int32_t>(VT_INITIALIZATION_STATUS, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int32_t>(verifier, VT_INITIALIZATION_STATUS, 4) &&
           verifier.EndTable();
  }
  BenchmarkInitializationFailureT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkInitializationFailureT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkInitializationFailure> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkInitializationFailureBuilder {
  typedef BenchmarkInitializationFailure Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_initialization_status(int32_t initialization_status) {
    fbb_.AddElement<int32_t>(BenchmarkInitializationFailure::VT_INITIALIZATION_STATUS, initialization_status, 0);
  }
  explicit BenchmarkInitializationFailureBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkInitializationFailure> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkInitializationFailure>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkInitializationFailure> CreateBenchmarkInitializationFailure(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int32_t initialization_status = 0) {
  BenchmarkInitializationFailureBuilder builder_(_fbb);
  builder_.add_initialization_status(initialization_status);
  return builder_.Finish();
}

::flatbuffers::Offset<BenchmarkInitializationFailure> CreateBenchmarkInitializationFailure(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct MiniBenchmarkEventT : public ::flatbuffers::NativeTable {
  typedef MiniBenchmarkEvent TableType;
  bool is_log_flushing_event = false;
  std::unique_ptr<tflite::BestAccelerationDecisionT> best_acceleration_decision{};
  std::unique_ptr<tflite::BenchmarkInitializationFailureT> initialization_failure{};
  std::unique_ptr<tflite::BenchmarkEventT> benchmark_event{};
  MiniBenchmarkEventT() = default;
  MiniBenchmarkEventT(const MiniBenchmarkEventT &o);
  MiniBenchmarkEventT(MiniBenchmarkEventT&&) FLATBUFFERS_NOEXCEPT = default;
  MiniBenchmarkEventT &operator=(MiniBenchmarkEventT o) FLATBUFFERS_NOEXCEPT;
};

struct MiniBenchmarkEvent FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef MiniBenchmarkEventT NativeTableType;
  typedef MiniBenchmarkEventBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_IS_LOG_FLUSHING_EVENT = 4,
    VT_BEST_ACCELERATION_DECISION = 6,
    VT_INITIALIZATION_FAILURE = 8,
    VT_BENCHMARK_EVENT = 10
  };
  bool is_log_flushing_event() const {
    return GetField<uint8_t>(VT_IS_LOG_FLUSHING_EVENT, 0) != 0;
  }
  const tflite::BestAccelerationDecision *best_acceleration_decision() const {
    return GetPointer<const tflite::BestAccelerationDecision *>(VT_BEST_ACCELERATION_DECISION);
  }
  const tflite::BenchmarkInitializationFailure *initialization_failure() const {
    return GetPointer<const tflite::BenchmarkInitializationFailure *>(VT_INITIALIZATION_FAILURE);
  }
  const tflite::BenchmarkEvent *benchmark_event() const {
    return GetPointer<const tflite::BenchmarkEvent *>(VT_BENCHMARK_EVENT);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<uint8_t>(verifier, VT_IS_LOG_FLUSHING_EVENT, 1) &&
           VerifyOffset(verifier, VT_BEST_ACCELERATION_DECISION) &&
           verifier.VerifyTable(best_acceleration_decision()) &&
           VerifyOffset(verifier, VT_INITIALIZATION_FAILURE) &&
           verifier.VerifyTable(initialization_failure()) &&
           VerifyOffset(verifier, VT_BENCHMARK_EVENT) &&
           verifier.VerifyTable(benchmark_event()) &&
           verifier.EndTable();
  }
  MiniBenchmarkEventT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(MiniBenchmarkEventT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<MiniBenchmarkEvent> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct MiniBenchmarkEventBuilder {
  typedef MiniBenchmarkEvent Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_is_log_flushing_event(bool is_log_flushing_event) {
    fbb_.AddElement<uint8_t>(MiniBenchmarkEvent::VT_IS_LOG_FLUSHING_EVENT, static_cast<uint8_t>(is_log_flushing_event), 0);
  }
  void add_best_acceleration_decision(::flatbuffers::Offset<tflite::BestAccelerationDecision> best_acceleration_decision) {
    fbb_.AddOffset(MiniBenchmarkEvent::VT_BEST_ACCELERATION_DECISION, best_acceleration_decision);
  }
  void add_initialization_failure(::flatbuffers::Offset<tflite::BenchmarkInitializationFailure> initialization_failure) {
    fbb_.AddOffset(MiniBenchmarkEvent::VT_INITIALIZATION_FAILURE, initialization_failure);
  }
  void add_benchmark_event(::flatbuffers::Offset<tflite::BenchmarkEvent> benchmark_event) {
    fbb_.AddOffset(MiniBenchmarkEvent::VT_BENCHMARK_EVENT, benchmark_event);
  }
  explicit MiniBenchmarkEventBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<MiniBenchmarkEvent> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<MiniBenchmarkEvent>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<MiniBenchmarkEvent> CreateMiniBenchmarkEvent(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    bool is_log_flushing_event = false,
    ::flatbuffers::Offset<tflite::BestAccelerationDecision> best_acceleration_decision = 0,
    ::flatbuffers::Offset<tflite::BenchmarkInitializationFailure> initialization_failure = 0,
    ::flatbuffers::Offset<tflite::BenchmarkEvent> benchmark_event = 0) {
  MiniBenchmarkEventBuilder builder_(_fbb);
  builder_.add_benchmark_event(benchmark_event);
  builder_.add_initialization_failure(initialization_failure);
  builder_.add_best_acceleration_decision(best_acceleration_decision);
  builder_.add_is_log_flushing_event(is_log_flushing_event);
  return builder_.Finish();
}

::flatbuffers::Offset<MiniBenchmarkEvent> CreateMiniBenchmarkEvent(::flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct ModelFileT : public ::flatbuffers::NativeTable {
  typedef ModelFile TableType;
  std::string filename{};
  int64_t fd = 0;
  int64_t offset = 0;
  int64_t length = 0;
  std::unique_ptr<tflite::ModelIdGroupT> model_id_group{};
  int64_t buffer_handle = 0;
  ModelFileT() = default;
  ModelFileT(const ModelFileT &o);
  ModelFileT(ModelFileT&&) FLATBUFFERS_NOEXCEPT = default;
  ModelFileT &operator=(ModelFileT o) FLATBUFFERS_NOEXCEPT;
};

struct ModelFile FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef ModelFileT NativeTableType;
  typedef ModelFileBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_FILENAME = 4,
    VT_FD = 6,
    VT_OFFSET = 8,
    VT_LENGTH = 10,
    VT_MODEL_ID_GROUP = 12,
    VT_BUFFER_HANDLE = 14
  };
  const ::flatbuffers::String *filename() const {
    return GetPointer<const ::flatbuffers::String *>(VT_FILENAME);
  }
  int64_t fd() const {
    return GetField<int64_t>(VT_FD, 0);
  }
  int64_t offset() const {
    return GetField<int64_t>(VT_OFFSET, 0);
  }
  int64_t length() const {
    return GetField<int64_t>(VT_LENGTH, 0);
  }
  const tflite::ModelIdGroup *model_id_group() const {
    return GetPointer<const tflite::ModelIdGroup *>(VT_MODEL_ID_GROUP);
  }
  int64_t buffer_handle() const {
    return GetField<int64_t>(VT_BUFFER_HANDLE, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_FILENAME) &&
           verifier.VerifyString(filename()) &&
           VerifyField<int64_t>(verifier, VT_FD, 8) &&
           VerifyField<int64_t>(verifier, VT_OFFSET, 8) &&
           VerifyField<int64_t>(verifier, VT_LENGTH, 8) &&
           VerifyOffset(verifier, VT_MODEL_ID_GROUP) &&
           verifier.VerifyTable(model_id_group()) &&
           VerifyField<int64_t>(verifier, VT_BUFFER_HANDLE, 8) &&
           verifier.EndTable();
  }
  ModelFileT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(ModelFileT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<ModelFile> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct ModelFileBuilder {
  typedef ModelFile Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_filename(::flatbuffers::Offset<::flatbuffers::String> filename) {
    fbb_.AddOffset(ModelFile::VT_FILENAME, filename);
  }
  void add_fd(int64_t fd) {
    fbb_.AddElement<int64_t>(ModelFile::VT_FD, fd, 0);
  }
  void add_offset(int64_t offset) {
    fbb_.AddElement<int64_t>(ModelFile::VT_OFFSET, offset, 0);
  }
  void add_length(int64_t length) {
    fbb_.AddElement<int64_t>(ModelFile::VT_LENGTH, length, 0);
  }
  void add_model_id_group(::flatbuffers::Offset<tflite::ModelIdGroup> model_id_group) {
    fbb_.AddOffset(ModelFile::VT_MODEL_ID_GROUP, model_id_group);
  }
  void add_buffer_handle(int64_t buffer_handle) {
    fbb_.AddElement<int64_t>(ModelFile::VT_BUFFER_HANDLE, buffer_handle, 0);
  }
  explicit ModelFileBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<ModelFile> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<ModelFile>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<ModelFile> CreateModelFile(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> filename = 0,
    int64_t fd = 0,
    int64_t offset = 0,
    int64_t length = 0,
    ::flatbuffers::Offset<tflite::ModelIdGroup> model_id_group = 0,
    int64_t buffer_handle = 0) {
  ModelFileBuilder builder_(_fbb);
  builder_.add_buffer_handle(buffer_handle);
  builder_.add_length(length);
  builder_.add_offset(offset);
  builder_.add_fd(fd);
  builder_.add_model_id_group(model_id_group);
  builder_.add_filename(filename);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<ModelFile> CreateModelFileDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *filename = nullptr,
    int64_t fd = 0,
    int64_t offset = 0,
    int64_t length = 0,
    ::flatbuffers::Offset<tflite::ModelIdGroup> model_id_group = 0,
    int64_t buffer_handle = 0) {
  auto filename__ = filename ? _fbb.CreateString(filename) : 0;
  return tflite::CreateModelFile(
      _fbb,
      filename__,
      fd,
      offset,
      length,
      model_id_group,
      buffer_handle);
}

::flatbuffers::Offset<ModelFile> CreateModelFile(::flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct ModelIdGroupT : public ::flatbuffers::NativeTable {
  typedef ModelIdGroup TableType;
  std::string model_namespace{};
  std::string model_id{};
};

struct ModelIdGroup FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef ModelIdGroupT NativeTableType;
  typedef ModelIdGroupBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_MODEL_NAMESPACE = 4,
    VT_MODEL_ID = 6
  };
  const ::flatbuffers::String *model_namespace() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_NAMESPACE);
  }
  const ::flatbuffers::String *model_id() const {
    return GetPointer<const ::flatbuffers::String *>(VT_MODEL_ID);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_MODEL_NAMESPACE) &&
           verifier.VerifyString(model_namespace()) &&
           VerifyOffset(verifier, VT_MODEL_ID) &&
           verifier.VerifyString(model_id()) &&
           verifier.EndTable();
  }
  ModelIdGroupT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(ModelIdGroupT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<ModelIdGroup> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelIdGroupT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct ModelIdGroupBuilder {
  typedef ModelIdGroup Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_model_namespace(::flatbuffers::Offset<::flatbuffers::String> model_namespace) {
    fbb_.AddOffset(ModelIdGroup::VT_MODEL_NAMESPACE, model_namespace);
  }
  void add_model_id(::flatbuffers::Offset<::flatbuffers::String> model_id) {
    fbb_.AddOffset(ModelIdGroup::VT_MODEL_ID, model_id);
  }
  explicit ModelIdGroupBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<ModelIdGroup> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<ModelIdGroup>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<ModelIdGroup> CreateModelIdGroup(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> model_namespace = 0,
    ::flatbuffers::Offset<::flatbuffers::String> model_id = 0) {
  ModelIdGroupBuilder builder_(_fbb);
  builder_.add_model_id(model_id);
  builder_.add_model_namespace(model_namespace);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<ModelIdGroup> CreateModelIdGroupDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *model_namespace = nullptr,
    const char *model_id = nullptr) {
  auto model_namespace__ = model_namespace ? _fbb.CreateString(model_namespace) : 0;
  auto model_id__ = model_id ? _fbb.CreateString(model_id) : 0;
  return tflite::CreateModelIdGroup(
      _fbb,
      model_namespace__,
      model_id__);
}

::flatbuffers::Offset<ModelIdGroup> CreateModelIdGroup(::flatbuffers::FlatBufferBuilder &_fbb, const ModelIdGroupT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkStoragePathsT : public ::flatbuffers::NativeTable {
  typedef BenchmarkStoragePaths TableType;
  std::string storage_file_path{};
  std::string data_directory_path{};
};

struct BenchmarkStoragePaths FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkStoragePathsT NativeTableType;
  typedef BenchmarkStoragePathsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_STORAGE_FILE_PATH = 4,
    VT_DATA_DIRECTORY_PATH = 6
  };
  const ::flatbuffers::String *storage_file_path() const {
    return GetPointer<const ::flatbuffers::String *>(VT_STORAGE_FILE_PATH);
  }
  const ::flatbuffers::String *data_directory_path() const {
    return GetPointer<const ::flatbuffers::String *>(VT_DATA_DIRECTORY_PATH);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_STORAGE_FILE_PATH) &&
           verifier.VerifyString(storage_file_path()) &&
           VerifyOffset(verifier, VT_DATA_DIRECTORY_PATH) &&
           verifier.VerifyString(data_directory_path()) &&
           verifier.EndTable();
  }
  BenchmarkStoragePathsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkStoragePathsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkStoragePaths> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkStoragePathsBuilder {
  typedef BenchmarkStoragePaths Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_storage_file_path(::flatbuffers::Offset<::flatbuffers::String> storage_file_path) {
    fbb_.AddOffset(BenchmarkStoragePaths::VT_STORAGE_FILE_PATH, storage_file_path);
  }
  void add_data_directory_path(::flatbuffers::Offset<::flatbuffers::String> data_directory_path) {
    fbb_.AddOffset(BenchmarkStoragePaths::VT_DATA_DIRECTORY_PATH, data_directory_path);
  }
  explicit BenchmarkStoragePathsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkStoragePaths> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkStoragePaths>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkStoragePaths> CreateBenchmarkStoragePaths(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::String> storage_file_path = 0,
    ::flatbuffers::Offset<::flatbuffers::String> data_directory_path = 0) {
  BenchmarkStoragePathsBuilder builder_(_fbb);
  builder_.add_data_directory_path(data_directory_path);
  builder_.add_storage_file_path(storage_file_path);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<BenchmarkStoragePaths> CreateBenchmarkStoragePathsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const char *storage_file_path = nullptr,
    const char *data_directory_path = nullptr) {
  auto storage_file_path__ = storage_file_path ? _fbb.CreateString(storage_file_path) : 0;
  auto data_directory_path__ = data_directory_path ? _fbb.CreateString(data_directory_path) : 0;
  return tflite::CreateBenchmarkStoragePaths(
      _fbb,
      storage_file_path__,
      data_directory_path__);
}

::flatbuffers::Offset<BenchmarkStoragePaths> CreateBenchmarkStoragePaths(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct ValidationSettingsT : public ::flatbuffers::NativeTable {
  typedef ValidationSettings TableType;
  int64_t per_test_timeout_ms = 0;
};

struct ValidationSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef ValidationSettingsT NativeTableType;
  typedef ValidationSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_PER_TEST_TIMEOUT_MS = 4
  };
  int64_t per_test_timeout_ms() const {
    return GetField<int64_t>(VT_PER_TEST_TIMEOUT_MS, 0);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyField<int64_t>(verifier, VT_PER_TEST_TIMEOUT_MS, 8) &&
           verifier.EndTable();
  }
  ValidationSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(ValidationSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<ValidationSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ValidationSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct ValidationSettingsBuilder {
  typedef ValidationSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_per_test_timeout_ms(int64_t per_test_timeout_ms) {
    fbb_.AddElement<int64_t>(ValidationSettings::VT_PER_TEST_TIMEOUT_MS, per_test_timeout_ms, 0);
  }
  explicit ValidationSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<ValidationSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<ValidationSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<ValidationSettings> CreateValidationSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    int64_t per_test_timeout_ms = 0) {
  ValidationSettingsBuilder builder_(_fbb);
  builder_.add_per_test_timeout_ms(per_test_timeout_ms);
  return builder_.Finish();
}

::flatbuffers::Offset<ValidationSettings> CreateValidationSettings(::flatbuffers::FlatBufferBuilder &_fbb, const ValidationSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct MinibenchmarkSettingsT : public ::flatbuffers::NativeTable {
  typedef MinibenchmarkSettings TableType;
  std::vector<std::unique_ptr<tflite::TFLiteSettingsT>> settings_to_test{};
  std::unique_ptr<tflite::ModelFileT> model_file{};
  std::unique_ptr<tflite::BenchmarkStoragePathsT> storage_paths{};
  std::unique_ptr<tflite::ValidationSettingsT> validation_settings{};
  MinibenchmarkSettingsT() = default;
  MinibenchmarkSettingsT(const MinibenchmarkSettingsT &o);
  MinibenchmarkSettingsT(MinibenchmarkSettingsT&&) FLATBUFFERS_NOEXCEPT = default;
  MinibenchmarkSettingsT &operator=(MinibenchmarkSettingsT o) FLATBUFFERS_NOEXCEPT;
};

struct MinibenchmarkSettings FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef MinibenchmarkSettingsT NativeTableType;
  typedef MinibenchmarkSettingsBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_SETTINGS_TO_TEST = 4,
    VT_MODEL_FILE = 6,
    VT_STORAGE_PATHS = 8,
    VT_VALIDATION_SETTINGS = 10
  };
  const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::TFLiteSettings>> *settings_to_test() const {
    return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tflite::TFLiteSettings>> *>(VT_SETTINGS_TO_TEST);
  }
  const tflite::ModelFile *model_file() const {
    return GetPointer<const tflite::ModelFile *>(VT_MODEL_FILE);
  }
  const tflite::BenchmarkStoragePaths *storage_paths() const {
    return GetPointer<const tflite::BenchmarkStoragePaths *>(VT_STORAGE_PATHS);
  }
  const tflite::ValidationSettings *validation_settings() const {
    return GetPointer<const tflite::ValidationSettings *>(VT_VALIDATION_SETTINGS);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_SETTINGS_TO_TEST) &&
           verifier.VerifyVector(settings_to_test()) &&
           verifier.VerifyVectorOfTables(settings_to_test()) &&
           VerifyOffset(verifier, VT_MODEL_FILE) &&
           verifier.VerifyTable(model_file()) &&
           VerifyOffset(verifier, VT_STORAGE_PATHS) &&
           verifier.VerifyTable(storage_paths()) &&
           VerifyOffset(verifier, VT_VALIDATION_SETTINGS) &&
           verifier.VerifyTable(validation_settings()) &&
           verifier.EndTable();
  }
  MinibenchmarkSettingsT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(MinibenchmarkSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<MinibenchmarkSettings> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct MinibenchmarkSettingsBuilder {
  typedef MinibenchmarkSettings Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_settings_to_test(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::TFLiteSettings>>> settings_to_test) {
    fbb_.AddOffset(MinibenchmarkSettings::VT_SETTINGS_TO_TEST, settings_to_test);
  }
  void add_model_file(::flatbuffers::Offset<tflite::ModelFile> model_file) {
    fbb_.AddOffset(MinibenchmarkSettings::VT_MODEL_FILE, model_file);
  }
  void add_storage_paths(::flatbuffers::Offset<tflite::BenchmarkStoragePaths> storage_paths) {
    fbb_.AddOffset(MinibenchmarkSettings::VT_STORAGE_PATHS, storage_paths);
  }
  void add_validation_settings(::flatbuffers::Offset<tflite::ValidationSettings> validation_settings) {
    fbb_.AddOffset(MinibenchmarkSettings::VT_VALIDATION_SETTINGS, validation_settings);
  }
  explicit MinibenchmarkSettingsBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<MinibenchmarkSettings> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<MinibenchmarkSettings>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<MinibenchmarkSettings> CreateMinibenchmarkSettings(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tflite::TFLiteSettings>>> settings_to_test = 0,
    ::flatbuffers::Offset<tflite::ModelFile> model_file = 0,
    ::flatbuffers::Offset<tflite::BenchmarkStoragePaths> storage_paths = 0,
    ::flatbuffers::Offset<tflite::ValidationSettings> validation_settings = 0) {
  MinibenchmarkSettingsBuilder builder_(_fbb);
  builder_.add_validation_settings(validation_settings);
  builder_.add_storage_paths(storage_paths);
  builder_.add_model_file(model_file);
  builder_.add_settings_to_test(settings_to_test);
  return builder_.Finish();
}

inline ::flatbuffers::Offset<MinibenchmarkSettings> CreateMinibenchmarkSettingsDirect(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    const std::vector<::flatbuffers::Offset<tflite::TFLiteSettings>> *settings_to_test = nullptr,
    ::flatbuffers::Offset<tflite::ModelFile> model_file = 0,
    ::flatbuffers::Offset<tflite::BenchmarkStoragePaths> storage_paths = 0,
    ::flatbuffers::Offset<tflite::ValidationSettings> validation_settings = 0) {
  auto settings_to_test__ = settings_to_test ? _fbb.CreateVector<::flatbuffers::Offset<tflite::TFLiteSettings>>(*settings_to_test) : 0;
  return tflite::CreateMinibenchmarkSettings(
      _fbb,
      settings_to_test__,
      model_file,
      storage_paths,
      validation_settings);
}

::flatbuffers::Offset<MinibenchmarkSettings> CreateMinibenchmarkSettings(::flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);

struct BenchmarkEventStorageT : public ::flatbuffers::NativeTable {
  typedef BenchmarkEventStorage TableType;
  std::unique_ptr<tflite::ModelIdGroupT> model_id_group{};
  std::unique_ptr<tflite::BenchmarkEventT> benchmark_event{};
  BenchmarkEventStorageT() = default;
  BenchmarkEventStorageT(const BenchmarkEventStorageT &o);
  BenchmarkEventStorageT(BenchmarkEventStorageT&&) FLATBUFFERS_NOEXCEPT = default;
  BenchmarkEventStorageT &operator=(BenchmarkEventStorageT o) FLATBUFFERS_NOEXCEPT;
};

struct BenchmarkEventStorage FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
  typedef BenchmarkEventStorageT NativeTableType;
  typedef BenchmarkEventStorageBuilder Builder;
  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
    VT_MODEL_ID_GROUP = 4,
    VT_BENCHMARK_EVENT = 6
  };
  const tflite::ModelIdGroup *model_id_group() const {
    return GetPointer<const tflite::ModelIdGroup *>(VT_MODEL_ID_GROUP);
  }
  const tflite::BenchmarkEvent *benchmark_event() const {
    return GetPointer<const tflite::BenchmarkEvent *>(VT_BENCHMARK_EVENT);
  }
  bool Verify(::flatbuffers::Verifier &verifier) const {
    return VerifyTableStart(verifier) &&
           VerifyOffset(verifier, VT_MODEL_ID_GROUP) &&
           verifier.VerifyTable(model_id_group()) &&
           VerifyOffset(verifier, VT_BENCHMARK_EVENT) &&
           verifier.VerifyTable(benchmark_event()) &&
           verifier.EndTable();
  }
  BenchmarkEventStorageT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  void UnPackTo(BenchmarkEventStorageT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const;
  static ::flatbuffers::Offset<BenchmarkEventStorage> Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventStorageT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);
};

struct BenchmarkEventStorageBuilder {
  typedef BenchmarkEventStorage Table;
  ::flatbuffers::FlatBufferBuilder &fbb_;
  ::flatbuffers::uoffset_t start_;
  void add_model_id_group(::flatbuffers::Offset<tflite::ModelIdGroup> model_id_group) {
    fbb_.AddOffset(BenchmarkEventStorage::VT_MODEL_ID_GROUP, model_id_group);
  }
  void add_benchmark_event(::flatbuffers::Offset<tflite::BenchmarkEvent> benchmark_event) {
    fbb_.AddOffset(BenchmarkEventStorage::VT_BENCHMARK_EVENT, benchmark_event);
  }
  explicit BenchmarkEventStorageBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
        : fbb_(_fbb) {
    start_ = fbb_.StartTable();
  }
  ::flatbuffers::Offset<BenchmarkEventStorage> Finish() {
    const auto end = fbb_.EndTable(start_);
    auto o = ::flatbuffers::Offset<BenchmarkEventStorage>(end);
    return o;
  }
};

inline ::flatbuffers::Offset<BenchmarkEventStorage> CreateBenchmarkEventStorage(
    ::flatbuffers::FlatBufferBuilder &_fbb,
    ::flatbuffers::Offset<tflite::ModelIdGroup> model_id_group = 0,
    ::flatbuffers::Offset<tflite::BenchmarkEvent> benchmark_event = 0) {
  BenchmarkEventStorageBuilder builder_(_fbb);
  builder_.add_benchmark_event(benchmark_event);
  builder_.add_model_id_group(model_id_group);
  return builder_.Finish();
}

::flatbuffers::Offset<BenchmarkEventStorage> CreateBenchmarkEventStorage(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventStorageT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr);


inline bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) {
  return
      (lhs.preference == rhs.preference) &&
      ((lhs.tflite_settings == rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings)) &&
      (lhs.model_namespace_for_statistics == rhs.model_namespace_for_statistics) &&
      (lhs.model_identifier_for_statistics == rhs.model_identifier_for_statistics) &&
      ((lhs.settings_to_test_locally == rhs.settings_to_test_locally) || (lhs.settings_to_test_locally && rhs.settings_to_test_locally && *lhs.settings_to_test_locally == *rhs.settings_to_test_locally));
}

inline bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) {
    return !(lhs == rhs);
}


inline ComputeSettingsT::ComputeSettingsT(const ComputeSettingsT &o)
      : preference(o.preference),
        tflite_settings((o.tflite_settings) ? new tflite::TFLiteSettingsT(*o.tflite_settings) : nullptr),
        model_namespace_for_statistics(o.model_namespace_for_statistics),
        model_identifier_for_statistics(o.model_identifier_for_statistics),
        settings_to_test_locally((o.settings_to_test_locally) ? new tflite::MinibenchmarkSettingsT(*o.settings_to_test_locally) : nullptr) {
}

inline ComputeSettingsT &ComputeSettingsT::operator=(ComputeSettingsT o) FLATBUFFERS_NOEXCEPT {
  std::swap(preference, o.preference);
  std::swap(tflite_settings, o.tflite_settings);
  std::swap(model_namespace_for_statistics, o.model_namespace_for_statistics);
  std::swap(model_identifier_for_statistics, o.model_identifier_for_statistics);
  std::swap(settings_to_test_locally, o.settings_to_test_locally);
  return *this;
}

inline ComputeSettingsT *ComputeSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<ComputeSettingsT>(new ComputeSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void ComputeSettings::UnPackTo(ComputeSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = preference(); _o->preference = _e; }
  { auto _e = tflite_settings(); if (_e) { if(_o->tflite_settings) { _e->UnPackTo(_o->tflite_settings.get(), _resolver); } else { _o->tflite_settings = std::unique_ptr<tflite::TFLiteSettingsT>(_e->UnPack(_resolver)); } } else if (_o->tflite_settings) { _o->tflite_settings.reset(); } }
  { auto _e = model_namespace_for_statistics(); if (_e) _o->model_namespace_for_statistics = _e->str(); }
  { auto _e = model_identifier_for_statistics(); if (_e) _o->model_identifier_for_statistics = _e->str(); }
  { auto _e = settings_to_test_locally(); if (_e) { if(_o->settings_to_test_locally) { _e->UnPackTo(_o->settings_to_test_locally.get(), _resolver); } else { _o->settings_to_test_locally = std::unique_ptr<tflite::MinibenchmarkSettingsT>(_e->UnPack(_resolver)); } } else if (_o->settings_to_test_locally) { _o->settings_to_test_locally.reset(); } }
}

inline ::flatbuffers::Offset<ComputeSettings> ComputeSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateComputeSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<ComputeSettings> CreateComputeSettings(::flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ComputeSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _preference = _o->preference;
  auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0;
  auto _model_namespace_for_statistics = _o->model_namespace_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_namespace_for_statistics);
  auto _model_identifier_for_statistics = _o->model_identifier_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_identifier_for_statistics);
  auto _settings_to_test_locally = _o->settings_to_test_locally ? CreateMinibenchmarkSettings(_fbb, _o->settings_to_test_locally.get(), _rehasher) : 0;
  return tflite::CreateComputeSettings(
      _fbb,
      _preference,
      _tflite_settings,
      _model_namespace_for_statistics,
      _model_identifier_for_statistics,
      _settings_to_test_locally);
}


inline bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) {
  return
      (lhs.accelerator_name == rhs.accelerator_name) &&
      (lhs.cache_directory == rhs.cache_directory) &&
      (lhs.model_token == rhs.model_token) &&
      (lhs.execution_preference == rhs.execution_preference) &&
      (lhs.no_of_nnapi_instances_to_cache == rhs.no_of_nnapi_instances_to_cache) &&
      ((lhs.fallback_settings == rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings)) &&
      (lhs.allow_nnapi_cpu_on_android_10_plus == rhs.allow_nnapi_cpu_on_android_10_plus) &&
      (lhs.execution_priority == rhs.execution_priority) &&
      (lhs.allow_dynamic_dimensions == rhs.allow_dynamic_dimensions) &&
      (lhs.allow_fp16_precision_for_fp32 == rhs.allow_fp16_precision_for_fp32) &&
      (lhs.use_burst_computation == rhs.use_burst_computation) &&
      (lhs.support_library_handle == rhs.support_library_handle);
}

inline bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) {
    return !(lhs == rhs);
}


inline NNAPISettingsT::NNAPISettingsT(const NNAPISettingsT &o)
      : accelerator_name(o.accelerator_name),
        cache_directory(o.cache_directory),
        model_token(o.model_token),
        execution_preference(o.execution_preference),
        no_of_nnapi_instances_to_cache(o.no_of_nnapi_instances_to_cache),
        fallback_settings((o.fallback_settings) ? new tflite::FallbackSettingsT(*o.fallback_settings) : nullptr),
        allow_nnapi_cpu_on_android_10_plus(o.allow_nnapi_cpu_on_android_10_plus),
        execution_priority(o.execution_priority),
        allow_dynamic_dimensions(o.allow_dynamic_dimensions),
        allow_fp16_precision_for_fp32(o.allow_fp16_precision_for_fp32),
        use_burst_computation(o.use_burst_computation),
        support_library_handle(o.support_library_handle) {
}

inline NNAPISettingsT &NNAPISettingsT::operator=(NNAPISettingsT o) FLATBUFFERS_NOEXCEPT {
  std::swap(accelerator_name, o.accelerator_name);
  std::swap(cache_directory, o.cache_directory);
  std::swap(model_token, o.model_token);
  std::swap(execution_preference, o.execution_preference);
  std::swap(no_of_nnapi_instances_to_cache, o.no_of_nnapi_instances_to_cache);
  std::swap(fallback_settings, o.fallback_settings);
  std::swap(allow_nnapi_cpu_on_android_10_plus, o.allow_nnapi_cpu_on_android_10_plus);
  std::swap(execution_priority, o.execution_priority);
  std::swap(allow_dynamic_dimensions, o.allow_dynamic_dimensions);
  std::swap(allow_fp16_precision_for_fp32, o.allow_fp16_precision_for_fp32);
  std::swap(use_burst_computation, o.use_burst_computation);
  std::swap(support_library_handle, o.support_library_handle);
  return *this;
}

inline NNAPISettingsT *NNAPISettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<NNAPISettingsT>(new NNAPISettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void NNAPISettings::UnPackTo(NNAPISettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = accelerator_name(); if (_e) _o->accelerator_name = _e->str(); }
  { auto _e = cache_directory(); if (_e) _o->cache_directory = _e->str(); }
  { auto _e = model_token(); if (_e) _o->model_token = _e->str(); }
  { auto _e = execution_preference(); _o->execution_preference = _e; }
  { auto _e = no_of_nnapi_instances_to_cache(); _o->no_of_nnapi_instances_to_cache = _e; }
  { auto _e = fallback_settings(); if (_e) { if(_o->fallback_settings) { _e->UnPackTo(_o->fallback_settings.get(), _resolver); } else { _o->fallback_settings = std::unique_ptr<tflite::FallbackSettingsT>(_e->UnPack(_resolver)); } } else if (_o->fallback_settings) { _o->fallback_settings.reset(); } }
  { auto _e = allow_nnapi_cpu_on_android_10_plus(); _o->allow_nnapi_cpu_on_android_10_plus = _e; }
  { auto _e = execution_priority(); _o->execution_priority = _e; }
  { auto _e = allow_dynamic_dimensions(); _o->allow_dynamic_dimensions = _e; }
  { auto _e = allow_fp16_precision_for_fp32(); _o->allow_fp16_precision_for_fp32 = _e; }
  { auto _e = use_burst_computation(); _o->use_burst_computation = _e; }
  { auto _e = support_library_handle(); _o->support_library_handle = _e; }
}

inline ::flatbuffers::Offset<NNAPISettings> NNAPISettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateNNAPISettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(::flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const NNAPISettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _accelerator_name = _o->accelerator_name.empty() ? 0 : _fbb.CreateString(_o->accelerator_name);
  auto _cache_directory = _o->cache_directory.empty() ? 0 : _fbb.CreateString(_o->cache_directory);
  auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token);
  auto _execution_preference = _o->execution_preference;
  auto _no_of_nnapi_instances_to_cache = _o->no_of_nnapi_instances_to_cache;
  auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0;
  auto _allow_nnapi_cpu_on_android_10_plus = _o->allow_nnapi_cpu_on_android_10_plus;
  auto _execution_priority = _o->execution_priority;
  auto _allow_dynamic_dimensions = _o->allow_dynamic_dimensions;
  auto _allow_fp16_precision_for_fp32 = _o->allow_fp16_precision_for_fp32;
  auto _use_burst_computation = _o->use_burst_computation;
  auto _support_library_handle = _o->support_library_handle;
  return tflite::CreateNNAPISettings(
      _fbb,
      _accelerator_name,
      _cache_directory,
      _model_token,
      _execution_preference,
      _no_of_nnapi_instances_to_cache,
      _fallback_settings,
      _allow_nnapi_cpu_on_android_10_plus,
      _execution_priority,
      _allow_dynamic_dimensions,
      _allow_fp16_precision_for_fp32,
      _use_burst_computation,
      _support_library_handle);
}


inline bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs) {
  return
      (lhs.is_precision_loss_allowed == rhs.is_precision_loss_allowed) &&
      (lhs.enable_quantized_inference == rhs.enable_quantized_inference) &&
      (lhs.force_backend == rhs.force_backend) &&
      (lhs.inference_priority1 == rhs.inference_priority1) &&
      (lhs.inference_priority2 == rhs.inference_priority2) &&
      (lhs.inference_priority3 == rhs.inference_priority3) &&
      (lhs.inference_preference == rhs.inference_preference) &&
      (lhs.cache_directory == rhs.cache_directory) &&
      (lhs.model_token == rhs.model_token);
}

inline bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs) {
    return !(lhs == rhs);
}


inline GPUSettingsT *GPUSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<GPUSettingsT>(new GPUSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void GPUSettings::UnPackTo(GPUSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = is_precision_loss_allowed(); _o->is_precision_loss_allowed = _e; }
  { auto _e = enable_quantized_inference(); _o->enable_quantized_inference = _e; }
  { auto _e = force_backend(); _o->force_backend = _e; }
  { auto _e = inference_priority1(); _o->inference_priority1 = _e; }
  { auto _e = inference_priority2(); _o->inference_priority2 = _e; }
  { auto _e = inference_priority3(); _o->inference_priority3 = _e; }
  { auto _e = inference_preference(); _o->inference_preference = _e; }
  { auto _e = cache_directory(); if (_e) _o->cache_directory = _e->str(); }
  { auto _e = model_token(); if (_e) _o->model_token = _e->str(); }
}

inline ::flatbuffers::Offset<GPUSettings> GPUSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateGPUSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<GPUSettings> CreateGPUSettings(::flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GPUSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _is_precision_loss_allowed = _o->is_precision_loss_allowed;
  auto _enable_quantized_inference = _o->enable_quantized_inference;
  auto _force_backend = _o->force_backend;
  auto _inference_priority1 = _o->inference_priority1;
  auto _inference_priority2 = _o->inference_priority2;
  auto _inference_priority3 = _o->inference_priority3;
  auto _inference_preference = _o->inference_preference;
  auto _cache_directory = _o->cache_directory.empty() ? 0 : _fbb.CreateString(_o->cache_directory);
  auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token);
  return tflite::CreateGPUSettings(
      _fbb,
      _is_precision_loss_allowed,
      _enable_quantized_inference,
      _force_backend,
      _inference_priority1,
      _inference_priority2,
      _inference_priority3,
      _inference_preference,
      _cache_directory,
      _model_token);
}


inline bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) {
  return
      (lhs.debug_level == rhs.debug_level) &&
      (lhs.powersave_level == rhs.powersave_level) &&
      (lhs.print_graph_profile == rhs.print_graph_profile) &&
      (lhs.print_graph_debug == rhs.print_graph_debug);
}

inline bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) {
    return !(lhs == rhs);
}


inline HexagonSettingsT *HexagonSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<HexagonSettingsT>(new HexagonSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void HexagonSettings::UnPackTo(HexagonSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = debug_level(); _o->debug_level = _e; }
  { auto _e = powersave_level(); _o->powersave_level = _e; }
  { auto _e = print_graph_profile(); _o->print_graph_profile = _e; }
  { auto _e = print_graph_debug(); _o->print_graph_debug = _e; }
}

inline ::flatbuffers::Offset<HexagonSettings> HexagonSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateHexagonSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(::flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const HexagonSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _debug_level = _o->debug_level;
  auto _powersave_level = _o->powersave_level;
  auto _print_graph_profile = _o->print_graph_profile;
  auto _print_graph_debug = _o->print_graph_debug;
  return tflite::CreateHexagonSettings(
      _fbb,
      _debug_level,
      _powersave_level,
      _print_graph_profile,
      _print_graph_debug);
}


inline bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) {
  return
      (lhs.num_threads == rhs.num_threads) &&
      (lhs.flags == rhs.flags) &&
      (lhs.weight_cache_file_path == rhs.weight_cache_file_path);
}

inline bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) {
    return !(lhs == rhs);
}


inline XNNPackSettingsT *XNNPackSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<XNNPackSettingsT>(new XNNPackSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void XNNPackSettings::UnPackTo(XNNPackSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = num_threads(); _o->num_threads = _e; }
  { auto _e = flags(); _o->flags = _e; }
  { auto _e = weight_cache_file_path(); if (_e) _o->weight_cache_file_path = _e->str(); }
}

inline ::flatbuffers::Offset<XNNPackSettings> XNNPackSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateXNNPackSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(::flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const XNNPackSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _num_threads = _o->num_threads;
  auto _flags = _o->flags;
  auto _weight_cache_file_path = _o->weight_cache_file_path.empty() ? 0 : _fbb.CreateString(_o->weight_cache_file_path);
  return tflite::CreateXNNPackSettings(
      _fbb,
      _num_threads,
      _flags,
      _weight_cache_file_path);
}


inline bool operator==(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs) {
  return
      (lhs.enabled_devices == rhs.enabled_devices) &&
      (lhs.coreml_version == rhs.coreml_version) &&
      (lhs.max_delegated_partitions == rhs.max_delegated_partitions) &&
      (lhs.min_nodes_per_partition == rhs.min_nodes_per_partition);
}

inline bool operator!=(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs) {
    return !(lhs == rhs);
}


inline CoreMLSettingsT *CoreMLSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<CoreMLSettingsT>(new CoreMLSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void CoreMLSettings::UnPackTo(CoreMLSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = enabled_devices(); _o->enabled_devices = _e; }
  { auto _e = coreml_version(); _o->coreml_version = _e; }
  { auto _e = max_delegated_partitions(); _o->max_delegated_partitions = _e; }
  { auto _e = min_nodes_per_partition(); _o->min_nodes_per_partition = _e; }
}

inline ::flatbuffers::Offset<CoreMLSettings> CoreMLSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateCoreMLSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<CoreMLSettings> CreateCoreMLSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CoreMLSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _enabled_devices = _o->enabled_devices;
  auto _coreml_version = _o->coreml_version;
  auto _max_delegated_partitions = _o->max_delegated_partitions;
  auto _min_nodes_per_partition = _o->min_nodes_per_partition;
  return tflite::CreateCoreMLSettings(
      _fbb,
      _enabled_devices,
      _coreml_version,
      _max_delegated_partitions,
      _min_nodes_per_partition);
}


inline bool operator==(const StableDelegateLoaderSettingsT &lhs, const StableDelegateLoaderSettingsT &rhs) {
  return
      (lhs.delegate_path == rhs.delegate_path) &&
      (lhs.delegate_name == rhs.delegate_name);
}

inline bool operator!=(const StableDelegateLoaderSettingsT &lhs, const StableDelegateLoaderSettingsT &rhs) {
    return !(lhs == rhs);
}


inline StableDelegateLoaderSettingsT *StableDelegateLoaderSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<StableDelegateLoaderSettingsT>(new StableDelegateLoaderSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void StableDelegateLoaderSettings::UnPackTo(StableDelegateLoaderSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = delegate_path(); if (_e) _o->delegate_path = _e->str(); }
  { auto _e = delegate_name(); if (_e) _o->delegate_name = _e->str(); }
}

inline ::flatbuffers::Offset<StableDelegateLoaderSettings> StableDelegateLoaderSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const StableDelegateLoaderSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateStableDelegateLoaderSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<StableDelegateLoaderSettings> CreateStableDelegateLoaderSettings(::flatbuffers::FlatBufferBuilder &_fbb, const StableDelegateLoaderSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const StableDelegateLoaderSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _delegate_path = _o->delegate_path.empty() ? 0 : _fbb.CreateString(_o->delegate_path);
  auto _delegate_name = _o->delegate_name.empty() ? 0 : _fbb.CreateString(_o->delegate_name);
  return tflite::CreateStableDelegateLoaderSettings(
      _fbb,
      _delegate_path,
      _delegate_name);
}


inline bool operator==(const CompilationCachingSettingsT &lhs, const CompilationCachingSettingsT &rhs) {
  return
      (lhs.cache_dir == rhs.cache_dir) &&
      (lhs.model_token == rhs.model_token);
}

inline bool operator!=(const CompilationCachingSettingsT &lhs, const CompilationCachingSettingsT &rhs) {
    return !(lhs == rhs);
}


inline CompilationCachingSettingsT *CompilationCachingSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<CompilationCachingSettingsT>(new CompilationCachingSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void CompilationCachingSettings::UnPackTo(CompilationCachingSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = cache_dir(); if (_e) _o->cache_dir = _e->str(); }
  { auto _e = model_token(); if (_e) _o->model_token = _e->str(); }
}

inline ::flatbuffers::Offset<CompilationCachingSettings> CompilationCachingSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CompilationCachingSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateCompilationCachingSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<CompilationCachingSettings> CreateCompilationCachingSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CompilationCachingSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CompilationCachingSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _cache_dir = _o->cache_dir.empty() ? 0 : _fbb.CreateString(_o->cache_dir);
  auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token);
  return tflite::CreateCompilationCachingSettings(
      _fbb,
      _cache_dir,
      _model_token);
}


inline bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) {
  return
      (lhs.platform_type == rhs.platform_type) &&
      (lhs.num_chips == rhs.num_chips) &&
      (lhs.device_paths == rhs.device_paths) &&
      (lhs.chip_family == rhs.chip_family);
}

inline bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) {
    return !(lhs == rhs);
}


inline EdgeTpuDeviceSpecT *EdgeTpuDeviceSpec::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<EdgeTpuDeviceSpecT>(new EdgeTpuDeviceSpecT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void EdgeTpuDeviceSpec::UnPackTo(EdgeTpuDeviceSpecT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = platform_type(); _o->platform_type = _e; }
  { auto _e = num_chips(); _o->num_chips = _e; }
  { auto _e = device_paths(); if (_e) { _o->device_paths.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->device_paths[_i] = _e->Get(_i)->str(); } } else { _o->device_paths.resize(0); } }
  { auto _e = chip_family(); _o->chip_family = _e; }
}

inline ::flatbuffers::Offset<EdgeTpuDeviceSpec> EdgeTpuDeviceSpec::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateEdgeTpuDeviceSpec(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuDeviceSpecT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _platform_type = _o->platform_type;
  auto _num_chips = _o->num_chips;
  auto _device_paths = _o->device_paths.size() ? _fbb.CreateVectorOfStrings(_o->device_paths) : 0;
  auto _chip_family = _o->chip_family;
  return tflite::CreateEdgeTpuDeviceSpec(
      _fbb,
      _platform_type,
      _num_chips,
      _device_paths,
      _chip_family);
}


inline bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) {
  return
      (lhs.inactive_power_state == rhs.inactive_power_state) &&
      (lhs.inactive_timeout_us == rhs.inactive_timeout_us);
}

inline bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) {
    return !(lhs == rhs);
}


inline EdgeTpuInactivePowerConfigT *EdgeTpuInactivePowerConfig::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<EdgeTpuInactivePowerConfigT>(new EdgeTpuInactivePowerConfigT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void EdgeTpuInactivePowerConfig::UnPackTo(EdgeTpuInactivePowerConfigT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = inactive_power_state(); _o->inactive_power_state = _e; }
  { auto _e = inactive_timeout_us(); _o->inactive_timeout_us = _e; }
}

inline ::flatbuffers::Offset<EdgeTpuInactivePowerConfig> EdgeTpuInactivePowerConfig::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateEdgeTpuInactivePowerConfig(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuInactivePowerConfigT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _inactive_power_state = _o->inactive_power_state;
  auto _inactive_timeout_us = _o->inactive_timeout_us;
  return tflite::CreateEdgeTpuInactivePowerConfig(
      _fbb,
      _inactive_power_state,
      _inactive_timeout_us);
}


inline bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) {
  return
      (lhs.inference_power_state == rhs.inference_power_state) &&
      (lhs.inactive_power_configs.size() == rhs.inactive_power_configs.size() && std::equal(lhs.inactive_power_configs.cbegin(), lhs.inactive_power_configs.cend(), rhs.inactive_power_configs.cbegin(), [](std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT> const &a, std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT> const &b) { return (a == b) || (a && b && *a == *b); })) &&
      (lhs.inference_priority == rhs.inference_priority) &&
      ((lhs.edgetpu_device_spec == rhs.edgetpu_device_spec) || (lhs.edgetpu_device_spec && rhs.edgetpu_device_spec && *lhs.edgetpu_device_spec == *rhs.edgetpu_device_spec)) &&
      (lhs.model_token == rhs.model_token) &&
      (lhs.float_truncation_type == rhs.float_truncation_type) &&
      (lhs.qos_class == rhs.qos_class) &&
      (lhs.hardware_cluster_ids == rhs.hardware_cluster_ids) &&
      (lhs.public_model_id == rhs.public_model_id) &&
      (lhs.use_layer_ir_tgc_backend == rhs.use_layer_ir_tgc_backend);
}

inline bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) {
    return !(lhs == rhs);
}


inline EdgeTpuSettingsT::EdgeTpuSettingsT(const EdgeTpuSettingsT &o)
      : inference_power_state(o.inference_power_state),
        inference_priority(o.inference_priority),
        edgetpu_device_spec((o.edgetpu_device_spec) ? new tflite::EdgeTpuDeviceSpecT(*o.edgetpu_device_spec) : nullptr),
        model_token(o.model_token),
        float_truncation_type(o.float_truncation_type),
        qos_class(o.qos_class),
        hardware_cluster_ids(o.hardware_cluster_ids),
        public_model_id(o.public_model_id),
        use_layer_ir_tgc_backend(o.use_layer_ir_tgc_backend) {
  inactive_power_configs.reserve(o.inactive_power_configs.size());
  for (const auto &inactive_power_configs_ : o.inactive_power_configs) { inactive_power_configs.emplace_back((inactive_power_configs_) ? new tflite::EdgeTpuInactivePowerConfigT(*inactive_power_configs_) : nullptr); }
}

inline EdgeTpuSettingsT &EdgeTpuSettingsT::operator=(EdgeTpuSettingsT o) FLATBUFFERS_NOEXCEPT {
  std::swap(inference_power_state, o.inference_power_state);
  std::swap(inactive_power_configs, o.inactive_power_configs);
  std::swap(inference_priority, o.inference_priority);
  std::swap(edgetpu_device_spec, o.edgetpu_device_spec);
  std::swap(model_token, o.model_token);
  std::swap(float_truncation_type, o.float_truncation_type);
  std::swap(qos_class, o.qos_class);
  std::swap(hardware_cluster_ids, o.hardware_cluster_ids);
  std::swap(public_model_id, o.public_model_id);
  std::swap(use_layer_ir_tgc_backend, o.use_layer_ir_tgc_backend);
  return *this;
}

inline EdgeTpuSettingsT *EdgeTpuSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<EdgeTpuSettingsT>(new EdgeTpuSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void EdgeTpuSettings::UnPackTo(EdgeTpuSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = inference_power_state(); _o->inference_power_state = _e; }
  { auto _e = inactive_power_configs(); if (_e) { _o->inactive_power_configs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inactive_power_configs[_i]) { _e->Get(_i)->UnPackTo(_o->inactive_power_configs[_i].get(), _resolver); } else { _o->inactive_power_configs[_i] = std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->inactive_power_configs.resize(0); } }
  { auto _e = inference_priority(); _o->inference_priority = _e; }
  { auto _e = edgetpu_device_spec(); if (_e) { if(_o->edgetpu_device_spec) { _e->UnPackTo(_o->edgetpu_device_spec.get(), _resolver); } else { _o->edgetpu_device_spec = std::unique_ptr<tflite::EdgeTpuDeviceSpecT>(_e->UnPack(_resolver)); } } else if (_o->edgetpu_device_spec) { _o->edgetpu_device_spec.reset(); } }
  { auto _e = model_token(); if (_e) _o->model_token = _e->str(); }
  { auto _e = float_truncation_type(); _o->float_truncation_type = _e; }
  { auto _e = qos_class(); _o->qos_class = _e; }
  { auto _e = hardware_cluster_ids(); if (_e) { _o->hardware_cluster_ids.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->hardware_cluster_ids[_i] = _e->Get(_i); } } else { _o->hardware_cluster_ids.resize(0); } }
  { auto _e = public_model_id(); if (_e) _o->public_model_id = _e->str(); }
  { auto _e = use_layer_ir_tgc_backend(); _o->use_layer_ir_tgc_backend = _e; }
}

inline ::flatbuffers::Offset<EdgeTpuSettings> EdgeTpuSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateEdgeTpuSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(::flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _inference_power_state = _o->inference_power_state;
  auto _inactive_power_configs = _o->inactive_power_configs.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> (_o->inactive_power_configs.size(), [](size_t i, _VectorArgs *__va) { return CreateEdgeTpuInactivePowerConfig(*__va->__fbb, __va->__o->inactive_power_configs[i].get(), __va->__rehasher); }, &_va ) : 0;
  auto _inference_priority = _o->inference_priority;
  auto _edgetpu_device_spec = _o->edgetpu_device_spec ? CreateEdgeTpuDeviceSpec(_fbb, _o->edgetpu_device_spec.get(), _rehasher) : 0;
  auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token);
  auto _float_truncation_type = _o->float_truncation_type;
  auto _qos_class = _o->qos_class;
  auto _hardware_cluster_ids = _o->hardware_cluster_ids.size() ? _fbb.CreateVector(_o->hardware_cluster_ids) : 0;
  auto _public_model_id = _o->public_model_id.empty() ? 0 : _fbb.CreateString(_o->public_model_id);
  auto _use_layer_ir_tgc_backend = _o->use_layer_ir_tgc_backend;
  return tflite::CreateEdgeTpuSettings(
      _fbb,
      _inference_power_state,
      _inactive_power_configs,
      _inference_priority,
      _edgetpu_device_spec,
      _model_token,
      _float_truncation_type,
      _qos_class,
      _hardware_cluster_ids,
      _public_model_id,
      _use_layer_ir_tgc_backend);
}


inline bool operator==(const GoogleEdgeTpuSettingsT &lhs, const GoogleEdgeTpuSettingsT &rhs) {
  return
      (lhs.log_verbosity == rhs.log_verbosity) &&
      (lhs.enable_tracing == rhs.enable_tracing) &&
      (lhs.priority == rhs.priority) &&
      (lhs.extension_data == rhs.extension_data) &&
      (lhs.model_identifier == rhs.model_identifier) &&
      (lhs.use_async_api == rhs.use_async_api) &&
      (lhs.delegate_should_manage_cache_for_inputs == rhs.delegate_should_manage_cache_for_inputs) &&
      (lhs.delegate_should_manage_cache_for_outputs == rhs.delegate_should_manage_cache_for_outputs) &&
      (lhs.prefer_cache_coherency_for_inputs == rhs.prefer_cache_coherency_for_inputs) &&
      (lhs.prefer_cache_coherency_for_outputs == rhs.prefer_cache_coherency_for_outputs) &&
      (lhs.allow_fp16_precision_for_fp32 == rhs.allow_fp16_precision_for_fp32);
}

inline bool operator!=(const GoogleEdgeTpuSettingsT &lhs, const GoogleEdgeTpuSettingsT &rhs) {
    return !(lhs == rhs);
}


inline GoogleEdgeTpuSettingsT *GoogleEdgeTpuSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<GoogleEdgeTpuSettingsT>(new GoogleEdgeTpuSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void GoogleEdgeTpuSettings::UnPackTo(GoogleEdgeTpuSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = log_verbosity(); _o->log_verbosity = _e; }
  { auto _e = enable_tracing(); _o->enable_tracing = _e; }
  { auto _e = priority(); _o->priority = _e; }
  { auto _e = extension_data(); if (_e) { _o->extension_data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->extension_data.begin()); } }
  { auto _e = model_identifier(); if (_e) _o->model_identifier = _e->str(); }
  { auto _e = use_async_api(); _o->use_async_api = _e; }
  { auto _e = delegate_should_manage_cache_for_inputs(); _o->delegate_should_manage_cache_for_inputs = _e; }
  { auto _e = delegate_should_manage_cache_for_outputs(); _o->delegate_should_manage_cache_for_outputs = _e; }
  { auto _e = prefer_cache_coherency_for_inputs(); _o->prefer_cache_coherency_for_inputs = _e; }
  { auto _e = prefer_cache_coherency_for_outputs(); _o->prefer_cache_coherency_for_outputs = _e; }
  { auto _e = allow_fp16_precision_for_fp32(); _o->allow_fp16_precision_for_fp32 = _e; }
}

inline ::flatbuffers::Offset<GoogleEdgeTpuSettings> GoogleEdgeTpuSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const GoogleEdgeTpuSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateGoogleEdgeTpuSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<GoogleEdgeTpuSettings> CreateGoogleEdgeTpuSettings(::flatbuffers::FlatBufferBuilder &_fbb, const GoogleEdgeTpuSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const GoogleEdgeTpuSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _log_verbosity = _o->log_verbosity;
  auto _enable_tracing = _o->enable_tracing;
  auto _priority = _o->priority;
  auto _extension_data = _o->extension_data.size() ? _fbb.CreateVector(_o->extension_data) : 0;
  auto _model_identifier = _o->model_identifier.empty() ? 0 : _fbb.CreateString(_o->model_identifier);
  auto _use_async_api = _o->use_async_api;
  auto _delegate_should_manage_cache_for_inputs = _o->delegate_should_manage_cache_for_inputs;
  auto _delegate_should_manage_cache_for_outputs = _o->delegate_should_manage_cache_for_outputs;
  auto _prefer_cache_coherency_for_inputs = _o->prefer_cache_coherency_for_inputs;
  auto _prefer_cache_coherency_for_outputs = _o->prefer_cache_coherency_for_outputs;
  auto _allow_fp16_precision_for_fp32 = _o->allow_fp16_precision_for_fp32;
  return tflite::CreateGoogleEdgeTpuSettings(
      _fbb,
      _log_verbosity,
      _enable_tracing,
      _priority,
      _extension_data,
      _model_identifier,
      _use_async_api,
      _delegate_should_manage_cache_for_inputs,
      _delegate_should_manage_cache_for_outputs,
      _prefer_cache_coherency_for_inputs,
      _prefer_cache_coherency_for_outputs,
      _allow_fp16_precision_for_fp32);
}


inline bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs) {
  return
      (lhs.device == rhs.device) &&
      (lhs.performance == rhs.performance) &&
      (lhs.usb_always_dfu == rhs.usb_always_dfu) &&
      (lhs.usb_max_bulk_in_queue_length == rhs.usb_max_bulk_in_queue_length);
}

inline bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs) {
    return !(lhs == rhs);
}


inline CoralSettingsT *CoralSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<CoralSettingsT>(new CoralSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void CoralSettings::UnPackTo(CoralSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = device(); if (_e) _o->device = _e->str(); }
  { auto _e = performance(); _o->performance = _e; }
  { auto _e = usb_always_dfu(); _o->usb_always_dfu = _e; }
  { auto _e = usb_max_bulk_in_queue_length(); _o->usb_max_bulk_in_queue_length = _e; }
}

inline ::flatbuffers::Offset<CoralSettings> CoralSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateCoralSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<CoralSettings> CreateCoralSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CoralSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _device = _o->device.empty() ? 0 : _fbb.CreateString(_o->device);
  auto _performance = _o->performance;
  auto _usb_always_dfu = _o->usb_always_dfu;
  auto _usb_max_bulk_in_queue_length = _o->usb_max_bulk_in_queue_length;
  return tflite::CreateCoralSettings(
      _fbb,
      _device,
      _performance,
      _usb_always_dfu,
      _usb_max_bulk_in_queue_length);
}


inline bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs) {
  return
      (lhs.num_threads == rhs.num_threads);
}

inline bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs) {
    return !(lhs == rhs);
}


inline CPUSettingsT *CPUSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<CPUSettingsT>(new CPUSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void CPUSettings::UnPackTo(CPUSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = num_threads(); _o->num_threads = _e; }
}

inline ::flatbuffers::Offset<CPUSettings> CPUSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateCPUSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<CPUSettings> CreateCPUSettings(::flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const CPUSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _num_threads = _o->num_threads;
  return tflite::CreateCPUSettings(
      _fbb,
      _num_threads);
}


inline bool operator==(const ArmNNSettingsT &lhs, const ArmNNSettingsT &rhs) {
  return
      (lhs.backends == rhs.backends) &&
      (lhs.fastmath == rhs.fastmath) &&
      (lhs.additional_parameters == rhs.additional_parameters);
}

inline bool operator!=(const ArmNNSettingsT &lhs, const ArmNNSettingsT &rhs) {
    return !(lhs == rhs);
}


inline ArmNNSettingsT *ArmNNSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<ArmNNSettingsT>(new ArmNNSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void ArmNNSettings::UnPackTo(ArmNNSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = backends(); if (_e) _o->backends = _e->str(); }
  { auto _e = fastmath(); _o->fastmath = _e; }
  { auto _e = additional_parameters(); if (_e) _o->additional_parameters = _e->str(); }
}

inline ::flatbuffers::Offset<ArmNNSettings> ArmNNSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ArmNNSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateArmNNSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<ArmNNSettings> CreateArmNNSettings(::flatbuffers::FlatBufferBuilder &_fbb, const ArmNNSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ArmNNSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _backends = _o->backends.empty() ? 0 : _fbb.CreateString(_o->backends);
  auto _fastmath = _o->fastmath;
  auto _additional_parameters = _o->additional_parameters.empty() ? 0 : _fbb.CreateString(_o->additional_parameters);
  return tflite::CreateArmNNSettings(
      _fbb,
      _backends,
      _fastmath,
      _additional_parameters);
}


inline bool operator==(const MtkNeuronSettingsT &lhs, const MtkNeuronSettingsT &rhs) {
  return
      (lhs.execution_preference == rhs.execution_preference) &&
      (lhs.execution_priority == rhs.execution_priority) &&
      (lhs.optimization_hints == rhs.optimization_hints) &&
      (lhs.operation_check_mode == rhs.operation_check_mode) &&
      (lhs.allow_fp16_precision_for_fp32 == rhs.allow_fp16_precision_for_fp32) &&
      (lhs.use_ahwb == rhs.use_ahwb) &&
      (lhs.use_cacheable_buffer == rhs.use_cacheable_buffer) &&
      (lhs.compile_options == rhs.compile_options) &&
      (lhs.accelerator_names == rhs.accelerator_names) &&
      (lhs.neuron_config_path == rhs.neuron_config_path) &&
      (lhs.inference_deadline_ms == rhs.inference_deadline_ms) &&
      (lhs.inference_abort_time_ms == rhs.inference_abort_time_ms);
}

inline bool operator!=(const MtkNeuronSettingsT &lhs, const MtkNeuronSettingsT &rhs) {
    return !(lhs == rhs);
}


inline MtkNeuronSettingsT *MtkNeuronSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<MtkNeuronSettingsT>(new MtkNeuronSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void MtkNeuronSettings::UnPackTo(MtkNeuronSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = execution_preference(); _o->execution_preference = _e; }
  { auto _e = execution_priority(); _o->execution_priority = _e; }
  { auto _e = optimization_hints(); if (_e) { _o->optimization_hints.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->optimization_hints[_i] = static_cast<tflite::MtkNeuronSettings_::OptimizationHint>(_e->Get(_i)); } } else { _o->optimization_hints.resize(0); } }
  { auto _e = operation_check_mode(); _o->operation_check_mode = _e; }
  { auto _e = allow_fp16_precision_for_fp32(); _o->allow_fp16_precision_for_fp32 = _e; }
  { auto _e = use_ahwb(); _o->use_ahwb = _e; }
  { auto _e = use_cacheable_buffer(); _o->use_cacheable_buffer = _e; }
  { auto _e = compile_options(); if (_e) { _o->compile_options.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->compile_options[_i] = _e->Get(_i)->str(); } } else { _o->compile_options.resize(0); } }
  { auto _e = accelerator_names(); if (_e) { _o->accelerator_names.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->accelerator_names[_i] = _e->Get(_i)->str(); } } else { _o->accelerator_names.resize(0); } }
  { auto _e = neuron_config_path(); if (_e) _o->neuron_config_path = _e->str(); }
  { auto _e = inference_deadline_ms(); _o->inference_deadline_ms = _e; }
  { auto _e = inference_abort_time_ms(); _o->inference_abort_time_ms = _e; }
}

inline ::flatbuffers::Offset<MtkNeuronSettings> MtkNeuronSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MtkNeuronSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateMtkNeuronSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<MtkNeuronSettings> CreateMtkNeuronSettings(::flatbuffers::FlatBufferBuilder &_fbb, const MtkNeuronSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MtkNeuronSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _execution_preference = _o->execution_preference;
  auto _execution_priority = _o->execution_priority;
  auto _optimization_hints = _o->optimization_hints.size() ? _fbb.CreateVectorScalarCast<int32_t>(::flatbuffers::data(_o->optimization_hints), _o->optimization_hints.size()) : 0;
  auto _operation_check_mode = _o->operation_check_mode;
  auto _allow_fp16_precision_for_fp32 = _o->allow_fp16_precision_for_fp32;
  auto _use_ahwb = _o->use_ahwb;
  auto _use_cacheable_buffer = _o->use_cacheable_buffer;
  auto _compile_options = _o->compile_options.size() ? _fbb.CreateVectorOfStrings(_o->compile_options) : 0;
  auto _accelerator_names = _o->accelerator_names.size() ? _fbb.CreateVectorOfStrings(_o->accelerator_names) : 0;
  auto _neuron_config_path = _o->neuron_config_path.empty() ? 0 : _fbb.CreateString(_o->neuron_config_path);
  auto _inference_deadline_ms = _o->inference_deadline_ms;
  auto _inference_abort_time_ms = _o->inference_abort_time_ms;
  return tflite::CreateMtkNeuronSettings(
      _fbb,
      _execution_preference,
      _execution_priority,
      _optimization_hints,
      _operation_check_mode,
      _allow_fp16_precision_for_fp32,
      _use_ahwb,
      _use_cacheable_buffer,
      _compile_options,
      _accelerator_names,
      _neuron_config_path,
      _inference_deadline_ms,
      _inference_abort_time_ms);
}


inline bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) {
  return
      (lhs.delegate == rhs.delegate) &&
      ((lhs.nnapi_settings == rhs.nnapi_settings) || (lhs.nnapi_settings && rhs.nnapi_settings && *lhs.nnapi_settings == *rhs.nnapi_settings)) &&
      ((lhs.gpu_settings == rhs.gpu_settings) || (lhs.gpu_settings && rhs.gpu_settings && *lhs.gpu_settings == *rhs.gpu_settings)) &&
      ((lhs.hexagon_settings == rhs.hexagon_settings) || (lhs.hexagon_settings && rhs.hexagon_settings && *lhs.hexagon_settings == *rhs.hexagon_settings)) &&
      ((lhs.xnnpack_settings == rhs.xnnpack_settings) || (lhs.xnnpack_settings && rhs.xnnpack_settings && *lhs.xnnpack_settings == *rhs.xnnpack_settings)) &&
      ((lhs.coreml_settings == rhs.coreml_settings) || (lhs.coreml_settings && rhs.coreml_settings && *lhs.coreml_settings == *rhs.coreml_settings)) &&
      ((lhs.cpu_settings == rhs.cpu_settings) || (lhs.cpu_settings && rhs.cpu_settings && *lhs.cpu_settings == *rhs.cpu_settings)) &&
      (lhs.max_delegated_partitions == rhs.max_delegated_partitions) &&
      ((lhs.edgetpu_settings == rhs.edgetpu_settings) || (lhs.edgetpu_settings && rhs.edgetpu_settings && *lhs.edgetpu_settings == *rhs.edgetpu_settings)) &&
      ((lhs.coral_settings == rhs.coral_settings) || (lhs.coral_settings && rhs.coral_settings && *lhs.coral_settings == *rhs.coral_settings)) &&
      ((lhs.fallback_settings == rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings)) &&
      (lhs.disable_default_delegates == rhs.disable_default_delegates) &&
      ((lhs.stable_delegate_loader_settings == rhs.stable_delegate_loader_settings) || (lhs.stable_delegate_loader_settings && rhs.stable_delegate_loader_settings && *lhs.stable_delegate_loader_settings == *rhs.stable_delegate_loader_settings)) &&
      ((lhs.google_edgetpu_settings == rhs.google_edgetpu_settings) || (lhs.google_edgetpu_settings && rhs.google_edgetpu_settings && *lhs.google_edgetpu_settings == *rhs.google_edgetpu_settings)) &&
      ((lhs.compilation_caching_settings == rhs.compilation_caching_settings) || (lhs.compilation_caching_settings && rhs.compilation_caching_settings && *lhs.compilation_caching_settings == *rhs.compilation_caching_settings)) &&
      ((lhs.armnn_settings == rhs.armnn_settings) || (lhs.armnn_settings && rhs.armnn_settings && *lhs.armnn_settings == *rhs.armnn_settings)) &&
      ((lhs.mtk_neuron_settings == rhs.mtk_neuron_settings) || (lhs.mtk_neuron_settings && rhs.mtk_neuron_settings && *lhs.mtk_neuron_settings == *rhs.mtk_neuron_settings));
}

inline bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) {
    return !(lhs == rhs);
}


inline TFLiteSettingsT::TFLiteSettingsT(const TFLiteSettingsT &o)
      : delegate(o.delegate),
        nnapi_settings((o.nnapi_settings) ? new tflite::NNAPISettingsT(*o.nnapi_settings) : nullptr),
        gpu_settings((o.gpu_settings) ? new tflite::GPUSettingsT(*o.gpu_settings) : nullptr),
        hexagon_settings((o.hexagon_settings) ? new tflite::HexagonSettingsT(*o.hexagon_settings) : nullptr),
        xnnpack_settings((o.xnnpack_settings) ? new tflite::XNNPackSettingsT(*o.xnnpack_settings) : nullptr),
        coreml_settings((o.coreml_settings) ? new tflite::CoreMLSettingsT(*o.coreml_settings) : nullptr),
        cpu_settings((o.cpu_settings) ? new tflite::CPUSettingsT(*o.cpu_settings) : nullptr),
        max_delegated_partitions(o.max_delegated_partitions),
        edgetpu_settings((o.edgetpu_settings) ? new tflite::EdgeTpuSettingsT(*o.edgetpu_settings) : nullptr),
        coral_settings((o.coral_settings) ? new tflite::CoralSettingsT(*o.coral_settings) : nullptr),
        fallback_settings((o.fallback_settings) ? new tflite::FallbackSettingsT(*o.fallback_settings) : nullptr),
        disable_default_delegates(o.disable_default_delegates),
        stable_delegate_loader_settings((o.stable_delegate_loader_settings) ? new tflite::StableDelegateLoaderSettingsT(*o.stable_delegate_loader_settings) : nullptr),
        google_edgetpu_settings((o.google_edgetpu_settings) ? new tflite::GoogleEdgeTpuSettingsT(*o.google_edgetpu_settings) : nullptr),
        compilation_caching_settings((o.compilation_caching_settings) ? new tflite::CompilationCachingSettingsT(*o.compilation_caching_settings) : nullptr),
        armnn_settings((o.armnn_settings) ? new tflite::ArmNNSettingsT(*o.armnn_settings) : nullptr),
        mtk_neuron_settings((o.mtk_neuron_settings) ? new tflite::MtkNeuronSettingsT(*o.mtk_neuron_settings) : nullptr) {
}

inline TFLiteSettingsT &TFLiteSettingsT::operator=(TFLiteSettingsT o) FLATBUFFERS_NOEXCEPT {
  std::swap(delegate, o.delegate);
  std::swap(nnapi_settings, o.nnapi_settings);
  std::swap(gpu_settings, o.gpu_settings);
  std::swap(hexagon_settings, o.hexagon_settings);
  std::swap(xnnpack_settings, o.xnnpack_settings);
  std::swap(coreml_settings, o.coreml_settings);
  std::swap(cpu_settings, o.cpu_settings);
  std::swap(max_delegated_partitions, o.max_delegated_partitions);
  std::swap(edgetpu_settings, o.edgetpu_settings);
  std::swap(coral_settings, o.coral_settings);
  std::swap(fallback_settings, o.fallback_settings);
  std::swap(disable_default_delegates, o.disable_default_delegates);
  std::swap(stable_delegate_loader_settings, o.stable_delegate_loader_settings);
  std::swap(google_edgetpu_settings, o.google_edgetpu_settings);
  std::swap(compilation_caching_settings, o.compilation_caching_settings);
  std::swap(armnn_settings, o.armnn_settings);
  std::swap(mtk_neuron_settings, o.mtk_neuron_settings);
  return *this;
}

inline TFLiteSettingsT *TFLiteSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<TFLiteSettingsT>(new TFLiteSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void TFLiteSettings::UnPackTo(TFLiteSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = delegate(); _o->delegate = _e; }
  { auto _e = nnapi_settings(); if (_e) { if(_o->nnapi_settings) { _e->UnPackTo(_o->nnapi_settings.get(), _resolver); } else { _o->nnapi_settings = std::unique_ptr<tflite::NNAPISettingsT>(_e->UnPack(_resolver)); } } else if (_o->nnapi_settings) { _o->nnapi_settings.reset(); } }
  { auto _e = gpu_settings(); if (_e) { if(_o->gpu_settings) { _e->UnPackTo(_o->gpu_settings.get(), _resolver); } else { _o->gpu_settings = std::unique_ptr<tflite::GPUSettingsT>(_e->UnPack(_resolver)); } } else if (_o->gpu_settings) { _o->gpu_settings.reset(); } }
  { auto _e = hexagon_settings(); if (_e) { if(_o->hexagon_settings) { _e->UnPackTo(_o->hexagon_settings.get(), _resolver); } else { _o->hexagon_settings = std::unique_ptr<tflite::HexagonSettingsT>(_e->UnPack(_resolver)); } } else if (_o->hexagon_settings) { _o->hexagon_settings.reset(); } }
  { auto _e = xnnpack_settings(); if (_e) { if(_o->xnnpack_settings) { _e->UnPackTo(_o->xnnpack_settings.get(), _resolver); } else { _o->xnnpack_settings = std::unique_ptr<tflite::XNNPackSettingsT>(_e->UnPack(_resolver)); } } else if (_o->xnnpack_settings) { _o->xnnpack_settings.reset(); } }
  { auto _e = coreml_settings(); if (_e) { if(_o->coreml_settings) { _e->UnPackTo(_o->coreml_settings.get(), _resolver); } else { _o->coreml_settings = std::unique_ptr<tflite::CoreMLSettingsT>(_e->UnPack(_resolver)); } } else if (_o->coreml_settings) { _o->coreml_settings.reset(); } }
  { auto _e = cpu_settings(); if (_e) { if(_o->cpu_settings) { _e->UnPackTo(_o->cpu_settings.get(), _resolver); } else { _o->cpu_settings = std::unique_ptr<tflite::CPUSettingsT>(_e->UnPack(_resolver)); } } else if (_o->cpu_settings) { _o->cpu_settings.reset(); } }
  { auto _e = max_delegated_partitions(); _o->max_delegated_partitions = _e; }
  { auto _e = edgetpu_settings(); if (_e) { if(_o->edgetpu_settings) { _e->UnPackTo(_o->edgetpu_settings.get(), _resolver); } else { _o->edgetpu_settings = std::unique_ptr<tflite::EdgeTpuSettingsT>(_e->UnPack(_resolver)); } } else if (_o->edgetpu_settings) { _o->edgetpu_settings.reset(); } }
  { auto _e = coral_settings(); if (_e) { if(_o->coral_settings) { _e->UnPackTo(_o->coral_settings.get(), _resolver); } else { _o->coral_settings = std::unique_ptr<tflite::CoralSettingsT>(_e->UnPack(_resolver)); } } else if (_o->coral_settings) { _o->coral_settings.reset(); } }
  { auto _e = fallback_settings(); if (_e) { if(_o->fallback_settings) { _e->UnPackTo(_o->fallback_settings.get(), _resolver); } else { _o->fallback_settings = std::unique_ptr<tflite::FallbackSettingsT>(_e->UnPack(_resolver)); } } else if (_o->fallback_settings) { _o->fallback_settings.reset(); } }
  { auto _e = disable_default_delegates(); _o->disable_default_delegates = _e; }
  { auto _e = stable_delegate_loader_settings(); if (_e) { if(_o->stable_delegate_loader_settings) { _e->UnPackTo(_o->stable_delegate_loader_settings.get(), _resolver); } else { _o->stable_delegate_loader_settings = std::unique_ptr<tflite::StableDelegateLoaderSettingsT>(_e->UnPack(_resolver)); } } else if (_o->stable_delegate_loader_settings) { _o->stable_delegate_loader_settings.reset(); } }
  { auto _e = google_edgetpu_settings(); if (_e) { if(_o->google_edgetpu_settings) { _e->UnPackTo(_o->google_edgetpu_settings.get(), _resolver); } else { _o->google_edgetpu_settings = std::unique_ptr<tflite::GoogleEdgeTpuSettingsT>(_e->UnPack(_resolver)); } } else if (_o->google_edgetpu_settings) { _o->google_edgetpu_settings.reset(); } }
  { auto _e = compilation_caching_settings(); if (_e) { if(_o->compilation_caching_settings) { _e->UnPackTo(_o->compilation_caching_settings.get(), _resolver); } else { _o->compilation_caching_settings = std::unique_ptr<tflite::CompilationCachingSettingsT>(_e->UnPack(_resolver)); } } else if (_o->compilation_caching_settings) { _o->compilation_caching_settings.reset(); } }
  { auto _e = armnn_settings(); if (_e) { if(_o->armnn_settings) { _e->UnPackTo(_o->armnn_settings.get(), _resolver); } else { _o->armnn_settings = std::unique_ptr<tflite::ArmNNSettingsT>(_e->UnPack(_resolver)); } } else if (_o->armnn_settings) { _o->armnn_settings.reset(); } }
  { auto _e = mtk_neuron_settings(); if (_e) { if(_o->mtk_neuron_settings) { _e->UnPackTo(_o->mtk_neuron_settings.get(), _resolver); } else { _o->mtk_neuron_settings = std::unique_ptr<tflite::MtkNeuronSettingsT>(_e->UnPack(_resolver)); } } else if (_o->mtk_neuron_settings) { _o->mtk_neuron_settings.reset(); } }
}

inline ::flatbuffers::Offset<TFLiteSettings> TFLiteSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateTFLiteSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(::flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TFLiteSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _delegate = _o->delegate;
  auto _nnapi_settings = _o->nnapi_settings ? CreateNNAPISettings(_fbb, _o->nnapi_settings.get(), _rehasher) : 0;
  auto _gpu_settings = _o->gpu_settings ? CreateGPUSettings(_fbb, _o->gpu_settings.get(), _rehasher) : 0;
  auto _hexagon_settings = _o->hexagon_settings ? CreateHexagonSettings(_fbb, _o->hexagon_settings.get(), _rehasher) : 0;
  auto _xnnpack_settings = _o->xnnpack_settings ? CreateXNNPackSettings(_fbb, _o->xnnpack_settings.get(), _rehasher) : 0;
  auto _coreml_settings = _o->coreml_settings ? CreateCoreMLSettings(_fbb, _o->coreml_settings.get(), _rehasher) : 0;
  auto _cpu_settings = _o->cpu_settings ? CreateCPUSettings(_fbb, _o->cpu_settings.get(), _rehasher) : 0;
  auto _max_delegated_partitions = _o->max_delegated_partitions;
  auto _edgetpu_settings = _o->edgetpu_settings ? CreateEdgeTpuSettings(_fbb, _o->edgetpu_settings.get(), _rehasher) : 0;
  auto _coral_settings = _o->coral_settings ? CreateCoralSettings(_fbb, _o->coral_settings.get(), _rehasher) : 0;
  auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0;
  auto _disable_default_delegates = _o->disable_default_delegates;
  auto _stable_delegate_loader_settings = _o->stable_delegate_loader_settings ? CreateStableDelegateLoaderSettings(_fbb, _o->stable_delegate_loader_settings.get(), _rehasher) : 0;
  auto _google_edgetpu_settings = _o->google_edgetpu_settings ? CreateGoogleEdgeTpuSettings(_fbb, _o->google_edgetpu_settings.get(), _rehasher) : 0;
  auto _compilation_caching_settings = _o->compilation_caching_settings ? CreateCompilationCachingSettings(_fbb, _o->compilation_caching_settings.get(), _rehasher) : 0;
  auto _armnn_settings = _o->armnn_settings ? CreateArmNNSettings(_fbb, _o->armnn_settings.get(), _rehasher) : 0;
  auto _mtk_neuron_settings = _o->mtk_neuron_settings ? CreateMtkNeuronSettings(_fbb, _o->mtk_neuron_settings.get(), _rehasher) : 0;
  return tflite::CreateTFLiteSettings(
      _fbb,
      _delegate,
      _nnapi_settings,
      _gpu_settings,
      _hexagon_settings,
      _xnnpack_settings,
      _coreml_settings,
      _cpu_settings,
      _max_delegated_partitions,
      _edgetpu_settings,
      _coral_settings,
      _fallback_settings,
      _disable_default_delegates,
      _stable_delegate_loader_settings,
      _google_edgetpu_settings,
      _compilation_caching_settings,
      _armnn_settings,
      _mtk_neuron_settings);
}


inline bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) {
  return
      (lhs.allow_automatic_fallback_on_compilation_error == rhs.allow_automatic_fallback_on_compilation_error) &&
      (lhs.allow_automatic_fallback_on_execution_error == rhs.allow_automatic_fallback_on_execution_error);
}

inline bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) {
    return !(lhs == rhs);
}


inline FallbackSettingsT *FallbackSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<FallbackSettingsT>(new FallbackSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void FallbackSettings::UnPackTo(FallbackSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = allow_automatic_fallback_on_compilation_error(); _o->allow_automatic_fallback_on_compilation_error = _e; }
  { auto _e = allow_automatic_fallback_on_execution_error(); _o->allow_automatic_fallback_on_execution_error = _e; }
}

inline ::flatbuffers::Offset<FallbackSettings> FallbackSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateFallbackSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(::flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const FallbackSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _allow_automatic_fallback_on_compilation_error = _o->allow_automatic_fallback_on_compilation_error;
  auto _allow_automatic_fallback_on_execution_error = _o->allow_automatic_fallback_on_execution_error;
  return tflite::CreateFallbackSettings(
      _fbb,
      _allow_automatic_fallback_on_compilation_error,
      _allow_automatic_fallback_on_execution_error);
}


inline bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) {
  return
      (lhs.name == rhs.name) &&
      (lhs.values == rhs.values);
}

inline bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkMetricT *BenchmarkMetric::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkMetricT>(new BenchmarkMetricT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkMetric::UnPackTo(BenchmarkMetricT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = name(); if (_e) _o->name = _e->str(); }
  { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } else { _o->values.resize(0); } }
}

inline ::flatbuffers::Offset<BenchmarkMetric> BenchmarkMetric::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkMetric(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkMetricT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
  auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
  return tflite::CreateBenchmarkMetric(
      _fbb,
      _name,
      _values);
}


inline bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) {
  return
      (lhs.initialization_time_us == rhs.initialization_time_us) &&
      (lhs.inference_time_us == rhs.inference_time_us) &&
      (lhs.max_memory_kb == rhs.max_memory_kb) &&
      (lhs.ok == rhs.ok) &&
      (lhs.metrics.size() == rhs.metrics.size() && std::equal(lhs.metrics.cbegin(), lhs.metrics.cend(), rhs.metrics.cbegin(), [](std::unique_ptr<tflite::BenchmarkMetricT> const &a, std::unique_ptr<tflite::BenchmarkMetricT> const &b) { return (a == b) || (a && b && *a == *b); })) &&
      (lhs.actual_output.size() == rhs.actual_output.size() && std::equal(lhs.actual_output.cbegin(), lhs.actual_output.cend(), rhs.actual_output.cbegin(), [](std::unique_ptr<tflite::BenchmarkResult_::InferenceOutputT> const &a, std::unique_ptr<tflite::BenchmarkResult_::InferenceOutputT> const &b) { return (a == b) || (a && b && *a == *b); }));
}

inline bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkResultT::BenchmarkResultT(const BenchmarkResultT &o)
      : initialization_time_us(o.initialization_time_us),
        inference_time_us(o.inference_time_us),
        max_memory_kb(o.max_memory_kb),
        ok(o.ok) {
  metrics.reserve(o.metrics.size());
  for (const auto &metrics_ : o.metrics) { metrics.emplace_back((metrics_) ? new tflite::BenchmarkMetricT(*metrics_) : nullptr); }
  actual_output.reserve(o.actual_output.size());
  for (const auto &actual_output_ : o.actual_output) { actual_output.emplace_back((actual_output_) ? new tflite::BenchmarkResult_::InferenceOutputT(*actual_output_) : nullptr); }
}

inline BenchmarkResultT &BenchmarkResultT::operator=(BenchmarkResultT o) FLATBUFFERS_NOEXCEPT {
  std::swap(initialization_time_us, o.initialization_time_us);
  std::swap(inference_time_us, o.inference_time_us);
  std::swap(max_memory_kb, o.max_memory_kb);
  std::swap(ok, o.ok);
  std::swap(metrics, o.metrics);
  std::swap(actual_output, o.actual_output);
  return *this;
}

inline BenchmarkResultT *BenchmarkResult::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkResultT>(new BenchmarkResultT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkResult::UnPackTo(BenchmarkResultT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = initialization_time_us(); if (_e) { _o->initialization_time_us.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->initialization_time_us[_i] = _e->Get(_i); } } else { _o->initialization_time_us.resize(0); } }
  { auto _e = inference_time_us(); if (_e) { _o->inference_time_us.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inference_time_us[_i] = _e->Get(_i); } } else { _o->inference_time_us.resize(0); } }
  { auto _e = max_memory_kb(); _o->max_memory_kb = _e; }
  { auto _e = ok(); _o->ok = _e; }
  { auto _e = metrics(); if (_e) { _o->metrics.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metrics[_i]) { _e->Get(_i)->UnPackTo(_o->metrics[_i].get(), _resolver); } else { _o->metrics[_i] = std::unique_ptr<tflite::BenchmarkMetricT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->metrics.resize(0); } }
  { auto _e = actual_output(); if (_e) { _o->actual_output.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->actual_output[_i]) { _e->Get(_i)->UnPackTo(_o->actual_output[_i].get(), _resolver); } else { _o->actual_output[_i] = std::unique_ptr<tflite::BenchmarkResult_::InferenceOutputT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->actual_output.resize(0); } }
}

inline ::flatbuffers::Offset<BenchmarkResult> BenchmarkResult::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkResult(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkResultT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _initialization_time_us = _o->initialization_time_us.size() ? _fbb.CreateVector(_o->initialization_time_us) : 0;
  auto _inference_time_us = _o->inference_time_us.size() ? _fbb.CreateVector(_o->inference_time_us) : 0;
  auto _max_memory_kb = _o->max_memory_kb;
  auto _ok = _o->ok;
  auto _metrics = _o->metrics.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::BenchmarkMetric>> (_o->metrics.size(), [](size_t i, _VectorArgs *__va) { return CreateBenchmarkMetric(*__va->__fbb, __va->__o->metrics[i].get(), __va->__rehasher); }, &_va ) : 0;
  auto _actual_output = _o->actual_output.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::BenchmarkResult_::InferenceOutput>> (_o->actual_output.size(), [](size_t i, _VectorArgs *__va) { return CreateInferenceOutput(*__va->__fbb, __va->__o->actual_output[i].get(), __va->__rehasher); }, &_va ) : 0;
  return tflite::CreateBenchmarkResult(
      _fbb,
      _initialization_time_us,
      _inference_time_us,
      _max_memory_kb,
      _ok,
      _metrics,
      _actual_output);
}

namespace BenchmarkResult_ {


inline bool operator==(const InferenceOutputT &lhs, const InferenceOutputT &rhs) {
  return
      (lhs.value == rhs.value);
}

inline bool operator!=(const InferenceOutputT &lhs, const InferenceOutputT &rhs) {
    return !(lhs == rhs);
}


inline InferenceOutputT *InferenceOutput::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<InferenceOutputT>(new InferenceOutputT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void InferenceOutput::UnPackTo(InferenceOutputT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = value(); if (_e) { _o->value.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->value.begin()); } }
}

inline ::flatbuffers::Offset<InferenceOutput> InferenceOutput::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const InferenceOutputT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateInferenceOutput(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<InferenceOutput> CreateInferenceOutput(::flatbuffers::FlatBufferBuilder &_fbb, const InferenceOutputT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const InferenceOutputT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _value = _o->value.size() ? _fbb.CreateVector(_o->value) : 0;
  return tflite::BenchmarkResult_::CreateInferenceOutput(
      _fbb,
      _value);
}

}  // namespace BenchmarkResult_


inline bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs) {
  return
      (lhs.source == rhs.source) &&
      (lhs.tflite_error == rhs.tflite_error) &&
      (lhs.underlying_api_error == rhs.underlying_api_error);
}

inline bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs) {
    return !(lhs == rhs);
}


inline ErrorCodeT *ErrorCode::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<ErrorCodeT>(new ErrorCodeT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void ErrorCode::UnPackTo(ErrorCodeT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = source(); _o->source = _e; }
  { auto _e = tflite_error(); _o->tflite_error = _e; }
  { auto _e = underlying_api_error(); _o->underlying_api_error = _e; }
}

inline ::flatbuffers::Offset<ErrorCode> ErrorCode::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateErrorCode(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<ErrorCode> CreateErrorCode(::flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ErrorCodeT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _source = _o->source;
  auto _tflite_error = _o->tflite_error;
  auto _underlying_api_error = _o->underlying_api_error;
  return tflite::CreateErrorCode(
      _fbb,
      _source,
      _tflite_error,
      _underlying_api_error);
}


inline bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) {
  return
      (lhs.stage == rhs.stage) &&
      (lhs.exit_code == rhs.exit_code) &&
      (lhs.signal == rhs.signal) &&
      (lhs.error_code.size() == rhs.error_code.size() && std::equal(lhs.error_code.cbegin(), lhs.error_code.cend(), rhs.error_code.cbegin(), [](std::unique_ptr<tflite::ErrorCodeT> const &a, std::unique_ptr<tflite::ErrorCodeT> const &b) { return (a == b) || (a && b && *a == *b); })) &&
      (lhs.mini_benchmark_error_code == rhs.mini_benchmark_error_code);
}

inline bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkErrorT::BenchmarkErrorT(const BenchmarkErrorT &o)
      : stage(o.stage),
        exit_code(o.exit_code),
        signal(o.signal),
        mini_benchmark_error_code(o.mini_benchmark_error_code) {
  error_code.reserve(o.error_code.size());
  for (const auto &error_code_ : o.error_code) { error_code.emplace_back((error_code_) ? new tflite::ErrorCodeT(*error_code_) : nullptr); }
}

inline BenchmarkErrorT &BenchmarkErrorT::operator=(BenchmarkErrorT o) FLATBUFFERS_NOEXCEPT {
  std::swap(stage, o.stage);
  std::swap(exit_code, o.exit_code);
  std::swap(signal, o.signal);
  std::swap(error_code, o.error_code);
  std::swap(mini_benchmark_error_code, o.mini_benchmark_error_code);
  return *this;
}

inline BenchmarkErrorT *BenchmarkError::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkErrorT>(new BenchmarkErrorT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkError::UnPackTo(BenchmarkErrorT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = stage(); _o->stage = _e; }
  { auto _e = exit_code(); _o->exit_code = _e; }
  { auto _e = signal(); _o->signal = _e; }
  { auto _e = error_code(); if (_e) { _o->error_code.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->error_code[_i]) { _e->Get(_i)->UnPackTo(_o->error_code[_i].get(), _resolver); } else { _o->error_code[_i] = std::unique_ptr<tflite::ErrorCodeT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->error_code.resize(0); } }
  { auto _e = mini_benchmark_error_code(); _o->mini_benchmark_error_code = _e; }
}

inline ::flatbuffers::Offset<BenchmarkError> BenchmarkError::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkError(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkErrorT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _stage = _o->stage;
  auto _exit_code = _o->exit_code;
  auto _signal = _o->signal;
  auto _error_code = _o->error_code.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::ErrorCode>> (_o->error_code.size(), [](size_t i, _VectorArgs *__va) { return CreateErrorCode(*__va->__fbb, __va->__o->error_code[i].get(), __va->__rehasher); }, &_va ) : 0;
  auto _mini_benchmark_error_code = _o->mini_benchmark_error_code;
  return tflite::CreateBenchmarkError(
      _fbb,
      _stage,
      _exit_code,
      _signal,
      _error_code,
      _mini_benchmark_error_code);
}


inline bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) {
  return
      ((lhs.tflite_settings == rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings)) &&
      (lhs.event_type == rhs.event_type) &&
      ((lhs.result == rhs.result) || (lhs.result && rhs.result && *lhs.result == *rhs.result)) &&
      ((lhs.error == rhs.error) || (lhs.error && rhs.error && *lhs.error == *rhs.error)) &&
      (lhs.boottime_us == rhs.boottime_us) &&
      (lhs.wallclock_us == rhs.wallclock_us);
}

inline bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkEventT::BenchmarkEventT(const BenchmarkEventT &o)
      : tflite_settings((o.tflite_settings) ? new tflite::TFLiteSettingsT(*o.tflite_settings) : nullptr),
        event_type(o.event_type),
        result((o.result) ? new tflite::BenchmarkResultT(*o.result) : nullptr),
        error((o.error) ? new tflite::BenchmarkErrorT(*o.error) : nullptr),
        boottime_us(o.boottime_us),
        wallclock_us(o.wallclock_us) {
}

inline BenchmarkEventT &BenchmarkEventT::operator=(BenchmarkEventT o) FLATBUFFERS_NOEXCEPT {
  std::swap(tflite_settings, o.tflite_settings);
  std::swap(event_type, o.event_type);
  std::swap(result, o.result);
  std::swap(error, o.error);
  std::swap(boottime_us, o.boottime_us);
  std::swap(wallclock_us, o.wallclock_us);
  return *this;
}

inline BenchmarkEventT *BenchmarkEvent::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkEventT>(new BenchmarkEventT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkEvent::UnPackTo(BenchmarkEventT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = tflite_settings(); if (_e) { if(_o->tflite_settings) { _e->UnPackTo(_o->tflite_settings.get(), _resolver); } else { _o->tflite_settings = std::unique_ptr<tflite::TFLiteSettingsT>(_e->UnPack(_resolver)); } } else if (_o->tflite_settings) { _o->tflite_settings.reset(); } }
  { auto _e = event_type(); _o->event_type = _e; }
  { auto _e = result(); if (_e) { if(_o->result) { _e->UnPackTo(_o->result.get(), _resolver); } else { _o->result = std::unique_ptr<tflite::BenchmarkResultT>(_e->UnPack(_resolver)); } } else if (_o->result) { _o->result.reset(); } }
  { auto _e = error(); if (_e) { if(_o->error) { _e->UnPackTo(_o->error.get(), _resolver); } else { _o->error = std::unique_ptr<tflite::BenchmarkErrorT>(_e->UnPack(_resolver)); } } else if (_o->error) { _o->error.reset(); } }
  { auto _e = boottime_us(); _o->boottime_us = _e; }
  { auto _e = wallclock_us(); _o->wallclock_us = _e; }
}

inline ::flatbuffers::Offset<BenchmarkEvent> BenchmarkEvent::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkEvent(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkEventT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0;
  auto _event_type = _o->event_type;
  auto _result = _o->result ? CreateBenchmarkResult(_fbb, _o->result.get(), _rehasher) : 0;
  auto _error = _o->error ? CreateBenchmarkError(_fbb, _o->error.get(), _rehasher) : 0;
  auto _boottime_us = _o->boottime_us;
  auto _wallclock_us = _o->wallclock_us;
  return tflite::CreateBenchmarkEvent(
      _fbb,
      _tflite_settings,
      _event_type,
      _result,
      _error,
      _boottime_us,
      _wallclock_us);
}


inline bool operator==(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs) {
  return
      (lhs.number_of_source_events == rhs.number_of_source_events) &&
      ((lhs.min_latency_event == rhs.min_latency_event) || (lhs.min_latency_event && rhs.min_latency_event && *lhs.min_latency_event == *rhs.min_latency_event)) &&
      (lhs.min_inference_time_us == rhs.min_inference_time_us);
}

inline bool operator!=(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs) {
    return !(lhs == rhs);
}


inline BestAccelerationDecisionT::BestAccelerationDecisionT(const BestAccelerationDecisionT &o)
      : number_of_source_events(o.number_of_source_events),
        min_latency_event((o.min_latency_event) ? new tflite::BenchmarkEventT(*o.min_latency_event) : nullptr),
        min_inference_time_us(o.min_inference_time_us) {
}

inline BestAccelerationDecisionT &BestAccelerationDecisionT::operator=(BestAccelerationDecisionT o) FLATBUFFERS_NOEXCEPT {
  std::swap(number_of_source_events, o.number_of_source_events);
  std::swap(min_latency_event, o.min_latency_event);
  std::swap(min_inference_time_us, o.min_inference_time_us);
  return *this;
}

inline BestAccelerationDecisionT *BestAccelerationDecision::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BestAccelerationDecisionT>(new BestAccelerationDecisionT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BestAccelerationDecision::UnPackTo(BestAccelerationDecisionT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = number_of_source_events(); _o->number_of_source_events = _e; }
  { auto _e = min_latency_event(); if (_e) { if(_o->min_latency_event) { _e->UnPackTo(_o->min_latency_event.get(), _resolver); } else { _o->min_latency_event = std::unique_ptr<tflite::BenchmarkEventT>(_e->UnPack(_resolver)); } } else if (_o->min_latency_event) { _o->min_latency_event.reset(); } }
  { auto _e = min_inference_time_us(); _o->min_inference_time_us = _e; }
}

inline ::flatbuffers::Offset<BestAccelerationDecision> BestAccelerationDecision::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBestAccelerationDecision(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BestAccelerationDecision> CreateBestAccelerationDecision(::flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BestAccelerationDecisionT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _number_of_source_events = _o->number_of_source_events;
  auto _min_latency_event = _o->min_latency_event ? CreateBenchmarkEvent(_fbb, _o->min_latency_event.get(), _rehasher) : 0;
  auto _min_inference_time_us = _o->min_inference_time_us;
  return tflite::CreateBestAccelerationDecision(
      _fbb,
      _number_of_source_events,
      _min_latency_event,
      _min_inference_time_us);
}


inline bool operator==(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs) {
  return
      (lhs.initialization_status == rhs.initialization_status);
}

inline bool operator!=(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkInitializationFailureT *BenchmarkInitializationFailure::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkInitializationFailureT>(new BenchmarkInitializationFailureT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkInitializationFailure::UnPackTo(BenchmarkInitializationFailureT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = initialization_status(); _o->initialization_status = _e; }
}

inline ::flatbuffers::Offset<BenchmarkInitializationFailure> BenchmarkInitializationFailure::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkInitializationFailure(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkInitializationFailure> CreateBenchmarkInitializationFailure(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkInitializationFailureT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _initialization_status = _o->initialization_status;
  return tflite::CreateBenchmarkInitializationFailure(
      _fbb,
      _initialization_status);
}


inline bool operator==(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs) {
  return
      (lhs.is_log_flushing_event == rhs.is_log_flushing_event) &&
      ((lhs.best_acceleration_decision == rhs.best_acceleration_decision) || (lhs.best_acceleration_decision && rhs.best_acceleration_decision && *lhs.best_acceleration_decision == *rhs.best_acceleration_decision)) &&
      ((lhs.initialization_failure == rhs.initialization_failure) || (lhs.initialization_failure && rhs.initialization_failure && *lhs.initialization_failure == *rhs.initialization_failure)) &&
      ((lhs.benchmark_event == rhs.benchmark_event) || (lhs.benchmark_event && rhs.benchmark_event && *lhs.benchmark_event == *rhs.benchmark_event));
}

inline bool operator!=(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs) {
    return !(lhs == rhs);
}


inline MiniBenchmarkEventT::MiniBenchmarkEventT(const MiniBenchmarkEventT &o)
      : is_log_flushing_event(o.is_log_flushing_event),
        best_acceleration_decision((o.best_acceleration_decision) ? new tflite::BestAccelerationDecisionT(*o.best_acceleration_decision) : nullptr),
        initialization_failure((o.initialization_failure) ? new tflite::BenchmarkInitializationFailureT(*o.initialization_failure) : nullptr),
        benchmark_event((o.benchmark_event) ? new tflite::BenchmarkEventT(*o.benchmark_event) : nullptr) {
}

inline MiniBenchmarkEventT &MiniBenchmarkEventT::operator=(MiniBenchmarkEventT o) FLATBUFFERS_NOEXCEPT {
  std::swap(is_log_flushing_event, o.is_log_flushing_event);
  std::swap(best_acceleration_decision, o.best_acceleration_decision);
  std::swap(initialization_failure, o.initialization_failure);
  std::swap(benchmark_event, o.benchmark_event);
  return *this;
}

inline MiniBenchmarkEventT *MiniBenchmarkEvent::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<MiniBenchmarkEventT>(new MiniBenchmarkEventT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void MiniBenchmarkEvent::UnPackTo(MiniBenchmarkEventT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = is_log_flushing_event(); _o->is_log_flushing_event = _e; }
  { auto _e = best_acceleration_decision(); if (_e) { if(_o->best_acceleration_decision) { _e->UnPackTo(_o->best_acceleration_decision.get(), _resolver); } else { _o->best_acceleration_decision = std::unique_ptr<tflite::BestAccelerationDecisionT>(_e->UnPack(_resolver)); } } else if (_o->best_acceleration_decision) { _o->best_acceleration_decision.reset(); } }
  { auto _e = initialization_failure(); if (_e) { if(_o->initialization_failure) { _e->UnPackTo(_o->initialization_failure.get(), _resolver); } else { _o->initialization_failure = std::unique_ptr<tflite::BenchmarkInitializationFailureT>(_e->UnPack(_resolver)); } } else if (_o->initialization_failure) { _o->initialization_failure.reset(); } }
  { auto _e = benchmark_event(); if (_e) { if(_o->benchmark_event) { _e->UnPackTo(_o->benchmark_event.get(), _resolver); } else { _o->benchmark_event = std::unique_ptr<tflite::BenchmarkEventT>(_e->UnPack(_resolver)); } } else if (_o->benchmark_event) { _o->benchmark_event.reset(); } }
}

inline ::flatbuffers::Offset<MiniBenchmarkEvent> MiniBenchmarkEvent::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateMiniBenchmarkEvent(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<MiniBenchmarkEvent> CreateMiniBenchmarkEvent(::flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MiniBenchmarkEventT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _is_log_flushing_event = _o->is_log_flushing_event;
  auto _best_acceleration_decision = _o->best_acceleration_decision ? CreateBestAccelerationDecision(_fbb, _o->best_acceleration_decision.get(), _rehasher) : 0;
  auto _initialization_failure = _o->initialization_failure ? CreateBenchmarkInitializationFailure(_fbb, _o->initialization_failure.get(), _rehasher) : 0;
  auto _benchmark_event = _o->benchmark_event ? CreateBenchmarkEvent(_fbb, _o->benchmark_event.get(), _rehasher) : 0;
  return tflite::CreateMiniBenchmarkEvent(
      _fbb,
      _is_log_flushing_event,
      _best_acceleration_decision,
      _initialization_failure,
      _benchmark_event);
}


inline bool operator==(const ModelFileT &lhs, const ModelFileT &rhs) {
  return
      (lhs.filename == rhs.filename) &&
      (lhs.fd == rhs.fd) &&
      (lhs.offset == rhs.offset) &&
      (lhs.length == rhs.length) &&
      ((lhs.model_id_group == rhs.model_id_group) || (lhs.model_id_group && rhs.model_id_group && *lhs.model_id_group == *rhs.model_id_group)) &&
      (lhs.buffer_handle == rhs.buffer_handle);
}

inline bool operator!=(const ModelFileT &lhs, const ModelFileT &rhs) {
    return !(lhs == rhs);
}


inline ModelFileT::ModelFileT(const ModelFileT &o)
      : filename(o.filename),
        fd(o.fd),
        offset(o.offset),
        length(o.length),
        model_id_group((o.model_id_group) ? new tflite::ModelIdGroupT(*o.model_id_group) : nullptr),
        buffer_handle(o.buffer_handle) {
}

inline ModelFileT &ModelFileT::operator=(ModelFileT o) FLATBUFFERS_NOEXCEPT {
  std::swap(filename, o.filename);
  std::swap(fd, o.fd);
  std::swap(offset, o.offset);
  std::swap(length, o.length);
  std::swap(model_id_group, o.model_id_group);
  std::swap(buffer_handle, o.buffer_handle);
  return *this;
}

inline ModelFileT *ModelFile::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<ModelFileT>(new ModelFileT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void ModelFile::UnPackTo(ModelFileT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = filename(); if (_e) _o->filename = _e->str(); }
  { auto _e = fd(); _o->fd = _e; }
  { auto _e = offset(); _o->offset = _e; }
  { auto _e = length(); _o->length = _e; }
  { auto _e = model_id_group(); if (_e) { if(_o->model_id_group) { _e->UnPackTo(_o->model_id_group.get(), _resolver); } else { _o->model_id_group = std::unique_ptr<tflite::ModelIdGroupT>(_e->UnPack(_resolver)); } } else if (_o->model_id_group) { _o->model_id_group.reset(); } }
  { auto _e = buffer_handle(); _o->buffer_handle = _e; }
}

inline ::flatbuffers::Offset<ModelFile> ModelFile::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateModelFile(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<ModelFile> CreateModelFile(::flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelFileT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _filename = _o->filename.empty() ? 0 : _fbb.CreateString(_o->filename);
  auto _fd = _o->fd;
  auto _offset = _o->offset;
  auto _length = _o->length;
  auto _model_id_group = _o->model_id_group ? CreateModelIdGroup(_fbb, _o->model_id_group.get(), _rehasher) : 0;
  auto _buffer_handle = _o->buffer_handle;
  return tflite::CreateModelFile(
      _fbb,
      _filename,
      _fd,
      _offset,
      _length,
      _model_id_group,
      _buffer_handle);
}


inline bool operator==(const ModelIdGroupT &lhs, const ModelIdGroupT &rhs) {
  return
      (lhs.model_namespace == rhs.model_namespace) &&
      (lhs.model_id == rhs.model_id);
}

inline bool operator!=(const ModelIdGroupT &lhs, const ModelIdGroupT &rhs) {
    return !(lhs == rhs);
}


inline ModelIdGroupT *ModelIdGroup::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<ModelIdGroupT>(new ModelIdGroupT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void ModelIdGroup::UnPackTo(ModelIdGroupT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = model_namespace(); if (_e) _o->model_namespace = _e->str(); }
  { auto _e = model_id(); if (_e) _o->model_id = _e->str(); }
}

inline ::flatbuffers::Offset<ModelIdGroup> ModelIdGroup::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelIdGroupT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateModelIdGroup(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<ModelIdGroup> CreateModelIdGroup(::flatbuffers::FlatBufferBuilder &_fbb, const ModelIdGroupT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelIdGroupT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _model_namespace = _o->model_namespace.empty() ? 0 : _fbb.CreateString(_o->model_namespace);
  auto _model_id = _o->model_id.empty() ? 0 : _fbb.CreateString(_o->model_id);
  return tflite::CreateModelIdGroup(
      _fbb,
      _model_namespace,
      _model_id);
}


inline bool operator==(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs) {
  return
      (lhs.storage_file_path == rhs.storage_file_path) &&
      (lhs.data_directory_path == rhs.data_directory_path);
}

inline bool operator!=(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkStoragePathsT *BenchmarkStoragePaths::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkStoragePathsT>(new BenchmarkStoragePathsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkStoragePaths::UnPackTo(BenchmarkStoragePathsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = storage_file_path(); if (_e) _o->storage_file_path = _e->str(); }
  { auto _e = data_directory_path(); if (_e) _o->data_directory_path = _e->str(); }
}

inline ::flatbuffers::Offset<BenchmarkStoragePaths> BenchmarkStoragePaths::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkStoragePaths(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkStoragePaths> CreateBenchmarkStoragePaths(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkStoragePathsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _storage_file_path = _o->storage_file_path.empty() ? 0 : _fbb.CreateString(_o->storage_file_path);
  auto _data_directory_path = _o->data_directory_path.empty() ? 0 : _fbb.CreateString(_o->data_directory_path);
  return tflite::CreateBenchmarkStoragePaths(
      _fbb,
      _storage_file_path,
      _data_directory_path);
}


inline bool operator==(const ValidationSettingsT &lhs, const ValidationSettingsT &rhs) {
  return
      (lhs.per_test_timeout_ms == rhs.per_test_timeout_ms);
}

inline bool operator!=(const ValidationSettingsT &lhs, const ValidationSettingsT &rhs) {
    return !(lhs == rhs);
}


inline ValidationSettingsT *ValidationSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<ValidationSettingsT>(new ValidationSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void ValidationSettings::UnPackTo(ValidationSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = per_test_timeout_ms(); _o->per_test_timeout_ms = _e; }
}

inline ::flatbuffers::Offset<ValidationSettings> ValidationSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ValidationSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateValidationSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<ValidationSettings> CreateValidationSettings(::flatbuffers::FlatBufferBuilder &_fbb, const ValidationSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ValidationSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _per_test_timeout_ms = _o->per_test_timeout_ms;
  return tflite::CreateValidationSettings(
      _fbb,
      _per_test_timeout_ms);
}


inline bool operator==(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs) {
  return
      (lhs.settings_to_test.size() == rhs.settings_to_test.size() && std::equal(lhs.settings_to_test.cbegin(), lhs.settings_to_test.cend(), rhs.settings_to_test.cbegin(), [](std::unique_ptr<tflite::TFLiteSettingsT> const &a, std::unique_ptr<tflite::TFLiteSettingsT> const &b) { return (a == b) || (a && b && *a == *b); })) &&
      ((lhs.model_file == rhs.model_file) || (lhs.model_file && rhs.model_file && *lhs.model_file == *rhs.model_file)) &&
      ((lhs.storage_paths == rhs.storage_paths) || (lhs.storage_paths && rhs.storage_paths && *lhs.storage_paths == *rhs.storage_paths)) &&
      ((lhs.validation_settings == rhs.validation_settings) || (lhs.validation_settings && rhs.validation_settings && *lhs.validation_settings == *rhs.validation_settings));
}

inline bool operator!=(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs) {
    return !(lhs == rhs);
}


inline MinibenchmarkSettingsT::MinibenchmarkSettingsT(const MinibenchmarkSettingsT &o)
      : model_file((o.model_file) ? new tflite::ModelFileT(*o.model_file) : nullptr),
        storage_paths((o.storage_paths) ? new tflite::BenchmarkStoragePathsT(*o.storage_paths) : nullptr),
        validation_settings((o.validation_settings) ? new tflite::ValidationSettingsT(*o.validation_settings) : nullptr) {
  settings_to_test.reserve(o.settings_to_test.size());
  for (const auto &settings_to_test_ : o.settings_to_test) { settings_to_test.emplace_back((settings_to_test_) ? new tflite::TFLiteSettingsT(*settings_to_test_) : nullptr); }
}

inline MinibenchmarkSettingsT &MinibenchmarkSettingsT::operator=(MinibenchmarkSettingsT o) FLATBUFFERS_NOEXCEPT {
  std::swap(settings_to_test, o.settings_to_test);
  std::swap(model_file, o.model_file);
  std::swap(storage_paths, o.storage_paths);
  std::swap(validation_settings, o.validation_settings);
  return *this;
}

inline MinibenchmarkSettingsT *MinibenchmarkSettings::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<MinibenchmarkSettingsT>(new MinibenchmarkSettingsT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void MinibenchmarkSettings::UnPackTo(MinibenchmarkSettingsT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = settings_to_test(); if (_e) { _o->settings_to_test.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->settings_to_test[_i]) { _e->Get(_i)->UnPackTo(_o->settings_to_test[_i].get(), _resolver); } else { _o->settings_to_test[_i] = std::unique_ptr<tflite::TFLiteSettingsT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->settings_to_test.resize(0); } }
  { auto _e = model_file(); if (_e) { if(_o->model_file) { _e->UnPackTo(_o->model_file.get(), _resolver); } else { _o->model_file = std::unique_ptr<tflite::ModelFileT>(_e->UnPack(_resolver)); } } else if (_o->model_file) { _o->model_file.reset(); } }
  { auto _e = storage_paths(); if (_e) { if(_o->storage_paths) { _e->UnPackTo(_o->storage_paths.get(), _resolver); } else { _o->storage_paths = std::unique_ptr<tflite::BenchmarkStoragePathsT>(_e->UnPack(_resolver)); } } else if (_o->storage_paths) { _o->storage_paths.reset(); } }
  { auto _e = validation_settings(); if (_e) { if(_o->validation_settings) { _e->UnPackTo(_o->validation_settings.get(), _resolver); } else { _o->validation_settings = std::unique_ptr<tflite::ValidationSettingsT>(_e->UnPack(_resolver)); } } else if (_o->validation_settings) { _o->validation_settings.reset(); } }
}

inline ::flatbuffers::Offset<MinibenchmarkSettings> MinibenchmarkSettings::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateMinibenchmarkSettings(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<MinibenchmarkSettings> CreateMinibenchmarkSettings(::flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const MinibenchmarkSettingsT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _settings_to_test = _o->settings_to_test.size() ? _fbb.CreateVector<::flatbuffers::Offset<tflite::TFLiteSettings>> (_o->settings_to_test.size(), [](size_t i, _VectorArgs *__va) { return CreateTFLiteSettings(*__va->__fbb, __va->__o->settings_to_test[i].get(), __va->__rehasher); }, &_va ) : 0;
  auto _model_file = _o->model_file ? CreateModelFile(_fbb, _o->model_file.get(), _rehasher) : 0;
  auto _storage_paths = _o->storage_paths ? CreateBenchmarkStoragePaths(_fbb, _o->storage_paths.get(), _rehasher) : 0;
  auto _validation_settings = _o->validation_settings ? CreateValidationSettings(_fbb, _o->validation_settings.get(), _rehasher) : 0;
  return tflite::CreateMinibenchmarkSettings(
      _fbb,
      _settings_to_test,
      _model_file,
      _storage_paths,
      _validation_settings);
}


inline bool operator==(const BenchmarkEventStorageT &lhs, const BenchmarkEventStorageT &rhs) {
  return
      ((lhs.model_id_group == rhs.model_id_group) || (lhs.model_id_group && rhs.model_id_group && *lhs.model_id_group == *rhs.model_id_group)) &&
      ((lhs.benchmark_event == rhs.benchmark_event) || (lhs.benchmark_event && rhs.benchmark_event && *lhs.benchmark_event == *rhs.benchmark_event));
}

inline bool operator!=(const BenchmarkEventStorageT &lhs, const BenchmarkEventStorageT &rhs) {
    return !(lhs == rhs);
}


inline BenchmarkEventStorageT::BenchmarkEventStorageT(const BenchmarkEventStorageT &o)
      : model_id_group((o.model_id_group) ? new tflite::ModelIdGroupT(*o.model_id_group) : nullptr),
        benchmark_event((o.benchmark_event) ? new tflite::BenchmarkEventT(*o.benchmark_event) : nullptr) {
}

inline BenchmarkEventStorageT &BenchmarkEventStorageT::operator=(BenchmarkEventStorageT o) FLATBUFFERS_NOEXCEPT {
  std::swap(model_id_group, o.model_id_group);
  std::swap(benchmark_event, o.benchmark_event);
  return *this;
}

inline BenchmarkEventStorageT *BenchmarkEventStorage::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const {
  auto _o = std::unique_ptr<BenchmarkEventStorageT>(new BenchmarkEventStorageT());
  UnPackTo(_o.get(), _resolver);
  return _o.release();
}

inline void BenchmarkEventStorage::UnPackTo(BenchmarkEventStorageT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
  (void)_o;
  (void)_resolver;
  { auto _e = model_id_group(); if (_e) { if(_o->model_id_group) { _e->UnPackTo(_o->model_id_group.get(), _resolver); } else { _o->model_id_group = std::unique_ptr<tflite::ModelIdGroupT>(_e->UnPack(_resolver)); } } else if (_o->model_id_group) { _o->model_id_group.reset(); } }
  { auto _e = benchmark_event(); if (_e) { if(_o->benchmark_event) { _e->UnPackTo(_o->benchmark_event.get(), _resolver); } else { _o->benchmark_event = std::unique_ptr<tflite::BenchmarkEventT>(_e->UnPack(_resolver)); } } else if (_o->benchmark_event) { _o->benchmark_event.reset(); } }
}

inline ::flatbuffers::Offset<BenchmarkEventStorage> BenchmarkEventStorage::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventStorageT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  return CreateBenchmarkEventStorage(_fbb, _o, _rehasher);
}

inline ::flatbuffers::Offset<BenchmarkEventStorage> CreateBenchmarkEventStorage(::flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventStorageT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) {
  (void)_rehasher;
  (void)_o;
  struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkEventStorageT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
  auto _model_id_group = _o->model_id_group ? CreateModelIdGroup(_fbb, _o->model_id_group.get(), _rehasher) : 0;
  auto _benchmark_event = _o->benchmark_event ? CreateBenchmarkEvent(_fbb, _o->benchmark_event.get(), _rehasher) : 0;
  return tflite::CreateBenchmarkEventStorage(
      _fbb,
      _model_id_group,
      _benchmark_event);
}

}  // namespace tflite

#endif  // FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_
